1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
10#include <net/ip.h>
11#include <linux/ipv6.h>
12#include <net/checksum.h>
13#include <linux/printk.h>
14
15#include "qlcnic.h"
16
17#define QLCNIC_TX_ETHER_PKT		0x01
18#define QLCNIC_TX_TCP_PKT		0x02
19#define QLCNIC_TX_UDP_PKT		0x03
20#define QLCNIC_TX_IP_PKT		0x04
21#define QLCNIC_TX_TCP_LSO		0x05
22#define QLCNIC_TX_TCP_LSO6		0x06
23#define QLCNIC_TX_ENCAP_PKT		0x07
24#define QLCNIC_TX_ENCAP_LSO		0x08
25#define QLCNIC_TX_TCPV6_PKT		0x0b
26#define QLCNIC_TX_UDPV6_PKT		0x0c
27
28#define QLCNIC_FLAGS_VLAN_TAGGED	0x10
29#define QLCNIC_FLAGS_VLAN_OOB		0x40
30
31#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
32	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
33#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
34	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
35#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
36	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
37
38#define qlcnic_set_tx_port(_desc, _port) \
39	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
40
41#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
42	((_desc)->flags_opcode |= \
43	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
44
45#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
46	((_desc)->nfrags__length = \
47	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
48
49/* owner bits of status_desc */
50#define STATUS_OWNER_HOST	(0x1ULL << 56)
51#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
52
53/* Status descriptor:
54   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
55   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
56   53-55 desc_cnt, 56-57 owner, 58-63 opcode
57 */
58#define qlcnic_get_sts_port(sts_data)	\
59	((sts_data) & 0x0F)
60#define qlcnic_get_sts_status(sts_data)	\
61	(((sts_data) >> 4) & 0x0F)
62#define qlcnic_get_sts_type(sts_data)	\
63	(((sts_data) >> 8) & 0x0F)
64#define qlcnic_get_sts_totallength(sts_data)	\
65	(((sts_data) >> 12) & 0xFFFF)
66#define qlcnic_get_sts_refhandle(sts_data)	\
67	(((sts_data) >> 28) & 0xFFFF)
68#define qlcnic_get_sts_prot(sts_data)	\
69	(((sts_data) >> 44) & 0x0F)
70#define qlcnic_get_sts_pkt_offset(sts_data)	\
71	(((sts_data) >> 48) & 0x1F)
72#define qlcnic_get_sts_desc_cnt(sts_data)	\
73	(((sts_data) >> 53) & 0x7)
74#define qlcnic_get_sts_opcode(sts_data)	\
75	(((sts_data) >> 58) & 0x03F)
76
77#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
78	((sts_data) & 0x07FFF)
79#define qlcnic_get_lro_sts_length(sts_data)	\
80	(((sts_data) >> 16) & 0x0FFFF)
81#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
82	(((sts_data) >> 32) & 0x0FF)
83#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
84	(((sts_data) >> 40) & 0x0FF)
85#define qlcnic_get_lro_sts_timestamp(sts_data)	\
86	(((sts_data) >> 48) & 0x1)
87#define qlcnic_get_lro_sts_type(sts_data)	\
88	(((sts_data) >> 49) & 0x7)
89#define qlcnic_get_lro_sts_push_flag(sts_data)		\
90	(((sts_data) >> 52) & 0x1)
91#define qlcnic_get_lro_sts_seq_number(sts_data)		\
92	((sts_data) & 0x0FFFFFFFF)
93#define qlcnic_get_lro_sts_mss(sts_data1)		\
94	((sts_data1 >> 32) & 0x0FFFF)
95
96#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
97
98/* opcode field in status_desc */
99#define QLCNIC_SYN_OFFLOAD	0x03
100#define QLCNIC_RXPKT_DESC  	0x04
101#define QLCNIC_OLD_RXPKT_DESC	0x3f
102#define QLCNIC_RESPONSE_DESC	0x05
103#define QLCNIC_LRO_DESC  	0x12
104
105#define QLCNIC_TX_POLL_BUDGET		128
106#define QLCNIC_TCP_HDR_SIZE		20
107#define QLCNIC_TCP_TS_OPTION_SIZE	12
108#define QLCNIC_FETCH_RING_ID(handle)	((handle) >> 63)
109#define QLCNIC_DESC_OWNER_FW		cpu_to_le64(STATUS_OWNER_PHANTOM)
110
111#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
112
113/* for status field in status_desc */
114#define STATUS_CKSUM_LOOP	0
115#define STATUS_CKSUM_OK		2
116
117#define qlcnic_83xx_pktln(sts)		((sts >> 32) & 0x3FFF)
118#define qlcnic_83xx_hndl(sts)		((sts >> 48) & 0x7FFF)
119#define qlcnic_83xx_csum_status(sts)	((sts >> 39) & 7)
120#define qlcnic_83xx_opcode(sts)	((sts >> 42) & 0xF)
121#define qlcnic_83xx_vlan_tag(sts)	(((sts) >> 48) & 0xFFFF)
122#define qlcnic_83xx_lro_pktln(sts)	(((sts) >> 32) & 0x3FFF)
123#define qlcnic_83xx_l2_hdr_off(sts)	(((sts) >> 16) & 0xFF)
124#define qlcnic_83xx_l4_hdr_off(sts)	(((sts) >> 24) & 0xFF)
125#define qlcnic_83xx_pkt_cnt(sts)	(((sts) >> 16) & 0x7)
126#define qlcnic_83xx_is_tstamp(sts)	(((sts) >> 40) & 1)
127#define qlcnic_83xx_is_psh_bit(sts)	(((sts) >> 41) & 1)
128#define qlcnic_83xx_is_ip_align(sts)	(((sts) >> 46) & 1)
129#define qlcnic_83xx_has_vlan_tag(sts)	(((sts) >> 47) & 1)
130
131static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
132				   int max);
133
134static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
135					    struct qlcnic_host_rds_ring *,
136					    u16, u16);
137
138static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
139{
140	return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
141}
142
143static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
144					u16 handle, u8 ring_id)
145{
146	if (qlcnic_83xx_check(adapter))
147		return handle | (ring_id << 15);
148	else
149		return handle;
150}
151
152static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
153{
154	return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
155}
156
157static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
158				      struct qlcnic_filter *fil,
159				      void *addr, u16 vlan_id)
160{
161	int ret;
162	u8 op;
163
164	op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
165	ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
166	if (ret)
167		return;
168
169	op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
170	ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
171	if (!ret) {
172		hlist_del(&fil->fnode);
173		adapter->rx_fhash.fnum--;
174	}
175}
176
177static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
178						    void *addr, u16 vlan_id)
179{
180	struct qlcnic_filter *tmp_fil = NULL;
181	struct hlist_node *n;
182
183	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
184		if (ether_addr_equal(tmp_fil->faddr, addr) &&
185		    tmp_fil->vlan_id == vlan_id)
186			return tmp_fil;
187	}
188
189	return NULL;
190}
191
192static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
193				 struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
194{
195	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
196	struct qlcnic_filter *fil, *tmp_fil;
197	struct hlist_head *head;
198	unsigned long time;
199	u64 src_addr = 0;
200	u8 hindex, op;
201	int ret;
202
203	if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
204		vlan_id = 0;
205
206	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
207	hindex = qlcnic_mac_hash(src_addr, vlan_id) &
208		 (adapter->fhash.fbucket_size - 1);
209
210	if (loopback_pkt) {
211		if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
212			return;
213
214		head = &(adapter->rx_fhash.fhead[hindex]);
215
216		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
217		if (tmp_fil) {
218			time = tmp_fil->ftime;
219			if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
220				tmp_fil->ftime = jiffies;
221			return;
222		}
223
224		fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
225		if (!fil)
226			return;
227
228		fil->ftime = jiffies;
229		memcpy(fil->faddr, &src_addr, ETH_ALEN);
230		fil->vlan_id = vlan_id;
231		spin_lock(&adapter->rx_mac_learn_lock);
232		hlist_add_head(&(fil->fnode), head);
233		adapter->rx_fhash.fnum++;
234		spin_unlock(&adapter->rx_mac_learn_lock);
235	} else {
236		head = &adapter->fhash.fhead[hindex];
237
238		spin_lock(&adapter->mac_learn_lock);
239
240		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
241		if (tmp_fil) {
242			op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
243			ret = qlcnic_sre_macaddr_change(adapter,
244							(u8 *)&src_addr,
245							vlan_id, op);
246			if (!ret) {
247				hlist_del(&tmp_fil->fnode);
248				adapter->fhash.fnum--;
249			}
250
251			spin_unlock(&adapter->mac_learn_lock);
252
253			return;
254		}
255
256		spin_unlock(&adapter->mac_learn_lock);
257
258		head = &adapter->rx_fhash.fhead[hindex];
259
260		spin_lock(&adapter->rx_mac_learn_lock);
261
262		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
263		if (tmp_fil)
264			qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
265						  vlan_id);
266
267		spin_unlock(&adapter->rx_mac_learn_lock);
268	}
269}
270
271void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
272			       u16 vlan_id)
273{
274	struct cmd_desc_type0 *hwdesc;
275	struct qlcnic_nic_req *req;
276	struct qlcnic_mac_req *mac_req;
277	struct qlcnic_vlan_req *vlan_req;
278	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
279	u32 producer;
280	u64 word;
281
282	producer = tx_ring->producer;
283	hwdesc = &tx_ring->desc_head[tx_ring->producer];
284
285	req = (struct qlcnic_nic_req *)hwdesc;
286	memset(req, 0, sizeof(struct qlcnic_nic_req));
287	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
288
289	word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
290	req->req_hdr = cpu_to_le64(word);
291
292	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
293	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
294	memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
295
296	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
297	vlan_req->vlan_id = cpu_to_le16(vlan_id);
298
299	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
300	smp_mb();
301}
302
303static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
304			       struct cmd_desc_type0 *first_desc,
305			       struct sk_buff *skb)
306{
307	struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
308	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
309	u16 protocol = ntohs(skb->protocol);
310	struct qlcnic_filter *fil, *tmp_fil;
311	struct hlist_head *head;
312	struct hlist_node *n;
313	u64 src_addr = 0;
314	u16 vlan_id = 0;
315	u8 hindex, hval;
316
317	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
318		return;
319
320	if (adapter->flags & QLCNIC_VLAN_FILTERING) {
321		if (protocol == ETH_P_8021Q) {
322			vh = (struct vlan_ethhdr *)skb->data;
323			vlan_id = ntohs(vh->h_vlan_TCI);
324		} else if (skb_vlan_tag_present(skb)) {
325			vlan_id = skb_vlan_tag_get(skb);
326		}
327	}
328
329	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
330	hval = qlcnic_mac_hash(src_addr, vlan_id);
331	hindex = hval & (adapter->fhash.fbucket_size - 1);
332	head = &(adapter->fhash.fhead[hindex]);
333
334	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
335		if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
336		    tmp_fil->vlan_id == vlan_id) {
337			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
338				qlcnic_change_filter(adapter, &src_addr,
339						     vlan_id);
340			tmp_fil->ftime = jiffies;
341			return;
342		}
343	}
344
345	if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
346		adapter->stats.mac_filter_limit_overrun++;
347		return;
348	}
349
350	fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
351	if (!fil)
352		return;
353
354	qlcnic_change_filter(adapter, &src_addr, vlan_id);
355	fil->ftime = jiffies;
356	fil->vlan_id = vlan_id;
357	memcpy(fil->faddr, &src_addr, ETH_ALEN);
358	spin_lock(&adapter->mac_learn_lock);
359	hlist_add_head(&(fil->fnode), head);
360	adapter->fhash.fnum++;
361	spin_unlock(&adapter->mac_learn_lock);
362}
363
364#define QLCNIC_ENCAP_VXLAN_PKT		BIT_0
365#define QLCNIC_ENCAP_OUTER_L3_IP6	BIT_1
366#define QLCNIC_ENCAP_INNER_L3_IP6	BIT_2
367#define QLCNIC_ENCAP_INNER_L4_UDP	BIT_3
368#define QLCNIC_ENCAP_DO_L3_CSUM		BIT_4
369#define QLCNIC_ENCAP_DO_L4_CSUM		BIT_5
370
371static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
372			       struct cmd_desc_type0 *first_desc,
373			       struct sk_buff *skb,
374			       struct qlcnic_host_tx_ring *tx_ring)
375{
376	u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
377	int copied, copy_len, descr_size;
378	u32 producer = tx_ring->producer;
379	struct cmd_desc_type0 *hwdesc;
380	u16 flags = 0, encap_descr = 0;
381
382	opcode = QLCNIC_TX_ETHER_PKT;
383	encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
384
385	if (skb_is_gso(skb)) {
386		inner_hdr_len = skb_inner_transport_header(skb) +
387				inner_tcp_hdrlen(skb) -
388				skb_inner_mac_header(skb);
389
390		/* VXLAN header size = 8 */
391		outer_hdr_len = skb_transport_offset(skb) + 8 +
392				sizeof(struct udphdr);
393		first_desc->outer_hdr_length = outer_hdr_len;
394		total_hdr_len = inner_hdr_len + outer_hdr_len;
395		encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
396			       QLCNIC_ENCAP_DO_L4_CSUM;
397		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
398		first_desc->hdr_length = inner_hdr_len;
399
400		/* Copy inner and outer headers in Tx descriptor(s)
401		 * If total_hdr_len > cmd_desc_type0, use multiple
402		 * descriptors
403		 */
404		copied = 0;
405		descr_size = (int)sizeof(struct cmd_desc_type0);
406		while (copied < total_hdr_len) {
407			copy_len = min(descr_size, (total_hdr_len - copied));
408			hwdesc = &tx_ring->desc_head[producer];
409			tx_ring->cmd_buf_arr[producer].skb = NULL;
410			skb_copy_from_linear_data_offset(skb, copied,
411							 (char *)hwdesc,
412							 copy_len);
413			copied += copy_len;
414			producer = get_next_index(producer, tx_ring->num_desc);
415		}
416
417		tx_ring->producer = producer;
418
419		/* Make sure updated tx_ring->producer is visible
420		 * for qlcnic_tx_avail()
421		 */
422		smp_mb();
423		adapter->stats.encap_lso_frames++;
424
425		opcode = QLCNIC_TX_ENCAP_LSO;
426	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
427		if (inner_ip_hdr(skb)->version == 6) {
428			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
429				encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
430		} else {
431			if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
432				encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
433		}
434
435		adapter->stats.encap_tx_csummed++;
436		opcode = QLCNIC_TX_ENCAP_PKT;
437	}
438
439	/* Prepare first 16 bits of byte offset 16 of Tx descriptor */
440	if (ip_hdr(skb)->version == 6)
441		encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
442
443	/* outer IP header's size in 32bit words size*/
444	encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
445
446	/* outer IP header offset */
447	encap_descr |= skb_network_offset(skb) << 10;
448	first_desc->encap_descr = cpu_to_le16(encap_descr);
449
450	first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
451				     skb->data;
452	first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
453
454	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
455
456	return 0;
457}
458
459static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
460			 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
461			 struct qlcnic_host_tx_ring *tx_ring)
462{
463	u8 l4proto, opcode = 0, hdr_len = 0;
464	u16 flags = 0, vlan_tci = 0;
465	int copied, offset, copy_len, size;
466	struct cmd_desc_type0 *hwdesc;
467	struct vlan_ethhdr *vh;
468	u16 protocol = ntohs(skb->protocol);
469	u32 producer = tx_ring->producer;
470
471	if (protocol == ETH_P_8021Q) {
472		vh = (struct vlan_ethhdr *)skb->data;
473		flags = QLCNIC_FLAGS_VLAN_TAGGED;
474		vlan_tci = ntohs(vh->h_vlan_TCI);
475		protocol = ntohs(vh->h_vlan_encapsulated_proto);
476	} else if (skb_vlan_tag_present(skb)) {
477		flags = QLCNIC_FLAGS_VLAN_OOB;
478		vlan_tci = skb_vlan_tag_get(skb);
479	}
480	if (unlikely(adapter->tx_pvid)) {
481		if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
482			return -EIO;
483		if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
484			goto set_flags;
485
486		flags = QLCNIC_FLAGS_VLAN_OOB;
487		vlan_tci = adapter->tx_pvid;
488	}
489set_flags:
490	qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
491	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
492
493	if (*(skb->data) & BIT_0) {
494		flags |= BIT_0;
495		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
496	}
497	opcode = QLCNIC_TX_ETHER_PKT;
498	if (skb_is_gso(skb)) {
499		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
500		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
501		first_desc->hdr_length = hdr_len;
502		opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
503						    QLCNIC_TX_TCP_LSO;
504
505		/* For LSO, we need to copy the MAC/IP/TCP headers into
506		* the descriptor ring */
507		copied = 0;
508		offset = 2;
509
510		if (flags & QLCNIC_FLAGS_VLAN_OOB) {
511			first_desc->hdr_length += VLAN_HLEN;
512			first_desc->tcp_hdr_offset = VLAN_HLEN;
513			first_desc->ip_hdr_offset = VLAN_HLEN;
514
515			/* Only in case of TSO on vlan device */
516			flags |= QLCNIC_FLAGS_VLAN_TAGGED;
517
518			/* Create a TSO vlan header template for firmware */
519			hwdesc = &tx_ring->desc_head[producer];
520			tx_ring->cmd_buf_arr[producer].skb = NULL;
521
522			copy_len = min((int)sizeof(struct cmd_desc_type0) -
523				       offset, hdr_len + VLAN_HLEN);
524
525			vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
526			skb_copy_from_linear_data(skb, vh, 12);
527			vh->h_vlan_proto = htons(ETH_P_8021Q);
528			vh->h_vlan_TCI = htons(vlan_tci);
529
530			skb_copy_from_linear_data_offset(skb, 12,
531							 (char *)vh + 16,
532							 copy_len - 16);
533			copied = copy_len - VLAN_HLEN;
534			offset = 0;
535			producer = get_next_index(producer, tx_ring->num_desc);
536		}
537
538		while (copied < hdr_len) {
539			size = (int)sizeof(struct cmd_desc_type0) - offset;
540			copy_len = min(size, (hdr_len - copied));
541			hwdesc = &tx_ring->desc_head[producer];
542			tx_ring->cmd_buf_arr[producer].skb = NULL;
543			skb_copy_from_linear_data_offset(skb, copied,
544							 (char *)hwdesc +
545							 offset, copy_len);
546			copied += copy_len;
547			offset = 0;
548			producer = get_next_index(producer, tx_ring->num_desc);
549		}
550
551		tx_ring->producer = producer;
552		smp_mb();
553		adapter->stats.lso_frames++;
554
555	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
556		if (protocol == ETH_P_IP) {
557			l4proto = ip_hdr(skb)->protocol;
558
559			if (l4proto == IPPROTO_TCP)
560				opcode = QLCNIC_TX_TCP_PKT;
561			else if (l4proto == IPPROTO_UDP)
562				opcode = QLCNIC_TX_UDP_PKT;
563		} else if (protocol == ETH_P_IPV6) {
564			l4proto = ipv6_hdr(skb)->nexthdr;
565
566			if (l4proto == IPPROTO_TCP)
567				opcode = QLCNIC_TX_TCPV6_PKT;
568			else if (l4proto == IPPROTO_UDP)
569				opcode = QLCNIC_TX_UDPV6_PKT;
570		}
571	}
572	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
573	first_desc->ip_hdr_offset += skb_network_offset(skb);
574	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
575
576	return 0;
577}
578
579static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
580			     struct qlcnic_cmd_buffer *pbuf)
581{
582	struct qlcnic_skb_frag *nf;
583	struct skb_frag_struct *frag;
584	int i, nr_frags;
585	dma_addr_t map;
586
587	nr_frags = skb_shinfo(skb)->nr_frags;
588	nf = &pbuf->frag_array[0];
589
590	map = pci_map_single(pdev, skb->data, skb_headlen(skb),
591			     PCI_DMA_TODEVICE);
592	if (pci_dma_mapping_error(pdev, map))
593		goto out_err;
594
595	nf->dma = map;
596	nf->length = skb_headlen(skb);
597
598	for (i = 0; i < nr_frags; i++) {
599		frag = &skb_shinfo(skb)->frags[i];
600		nf = &pbuf->frag_array[i+1];
601		map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
602				       DMA_TO_DEVICE);
603		if (dma_mapping_error(&pdev->dev, map))
604			goto unwind;
605
606		nf->dma = map;
607		nf->length = skb_frag_size(frag);
608	}
609
610	return 0;
611
612unwind:
613	while (--i >= 0) {
614		nf = &pbuf->frag_array[i+1];
615		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
616	}
617
618	nf = &pbuf->frag_array[0];
619	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
620
621out_err:
622	return -ENOMEM;
623}
624
625static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
626				 struct qlcnic_cmd_buffer *pbuf)
627{
628	struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
629	int i, nr_frags = skb_shinfo(skb)->nr_frags;
630
631	for (i = 0; i < nr_frags; i++) {
632		nf = &pbuf->frag_array[i+1];
633		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
634	}
635
636	nf = &pbuf->frag_array[0];
637	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
638	pbuf->skb = NULL;
639}
640
641static inline void qlcnic_clear_cmddesc(u64 *desc)
642{
643	desc[0] = 0ULL;
644	desc[2] = 0ULL;
645	desc[7] = 0ULL;
646}
647
648netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
649{
650	struct qlcnic_adapter *adapter = netdev_priv(netdev);
651	struct qlcnic_host_tx_ring *tx_ring;
652	struct qlcnic_cmd_buffer *pbuf;
653	struct qlcnic_skb_frag *buffrag;
654	struct cmd_desc_type0 *hwdesc, *first_desc;
655	struct pci_dev *pdev;
656	struct ethhdr *phdr;
657	int i, k, frag_count, delta = 0;
658	u32 producer, num_txd;
659	u16 protocol;
660	bool l4_is_udp = false;
661
662	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
663		netif_tx_stop_all_queues(netdev);
664		return NETDEV_TX_BUSY;
665	}
666
667	if (adapter->flags & QLCNIC_MACSPOOF) {
668		phdr = (struct ethhdr *)skb->data;
669		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
670			goto drop_packet;
671	}
672
673	tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
674	num_txd = tx_ring->num_desc;
675
676	frag_count = skb_shinfo(skb)->nr_frags + 1;
677
678	/* 14 frags supported for normal packet and
679	 * 32 frags supported for TSO packet
680	 */
681	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
682		for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
683			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
684
685		if (!__pskb_pull_tail(skb, delta))
686			goto drop_packet;
687
688		frag_count = 1 + skb_shinfo(skb)->nr_frags;
689	}
690
691	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
692		netif_tx_stop_queue(tx_ring->txq);
693		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
694			netif_tx_start_queue(tx_ring->txq);
695		} else {
696			tx_ring->tx_stats.xmit_off++;
697			return NETDEV_TX_BUSY;
698		}
699	}
700
701	producer = tx_ring->producer;
702	pbuf = &tx_ring->cmd_buf_arr[producer];
703	pdev = adapter->pdev;
704	first_desc = &tx_ring->desc_head[producer];
705	hwdesc = &tx_ring->desc_head[producer];
706	qlcnic_clear_cmddesc((u64 *)hwdesc);
707
708	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
709		adapter->stats.tx_dma_map_error++;
710		goto drop_packet;
711	}
712
713	pbuf->skb = skb;
714	pbuf->frag_count = frag_count;
715
716	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
717	qlcnic_set_tx_port(first_desc, adapter->portnum);
718
719	for (i = 0; i < frag_count; i++) {
720		k = i % 4;
721
722		if ((k == 0) && (i > 0)) {
723			/* move to next desc.*/
724			producer = get_next_index(producer, num_txd);
725			hwdesc = &tx_ring->desc_head[producer];
726			qlcnic_clear_cmddesc((u64 *)hwdesc);
727			tx_ring->cmd_buf_arr[producer].skb = NULL;
728		}
729
730		buffrag = &pbuf->frag_array[i];
731		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
732		switch (k) {
733		case 0:
734			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
735			break;
736		case 1:
737			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
738			break;
739		case 2:
740			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
741			break;
742		case 3:
743			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
744			break;
745		}
746	}
747
748	tx_ring->producer = get_next_index(producer, num_txd);
749	smp_mb();
750
751	protocol = ntohs(skb->protocol);
752	if (protocol == ETH_P_IP)
753		l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
754	else if (protocol == ETH_P_IPV6)
755		l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
756
757	/* Check if it is a VXLAN packet */
758	if (!skb->encapsulation || !l4_is_udp ||
759	    !qlcnic_encap_tx_offload(adapter)) {
760		if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
761					   tx_ring)))
762			goto unwind_buff;
763	} else {
764		if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
765						 skb, tx_ring)))
766			goto unwind_buff;
767	}
768
769	if (adapter->drv_mac_learn)
770		qlcnic_send_filter(adapter, first_desc, skb);
771
772	tx_ring->tx_stats.tx_bytes += skb->len;
773	tx_ring->tx_stats.xmit_called++;
774
775	qlcnic_update_cmd_producer(tx_ring);
776
777	return NETDEV_TX_OK;
778
779unwind_buff:
780	qlcnic_unmap_buffers(pdev, skb, pbuf);
781drop_packet:
782	adapter->stats.txdropped++;
783	dev_kfree_skb_any(skb);
784	return NETDEV_TX_OK;
785}
786
787void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
788{
789	struct net_device *netdev = adapter->netdev;
790
791	if (adapter->ahw->linkup && !linkup) {
792		netdev_info(netdev, "NIC Link is down\n");
793		adapter->ahw->linkup = 0;
794		netif_carrier_off(netdev);
795	} else if (!adapter->ahw->linkup && linkup) {
796		adapter->ahw->linkup = 1;
797
798		/* Do not advertise Link up to the stack if device
799		 * is in loopback mode
800		 */
801		if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
802			netdev_info(netdev, "NIC Link is up for loopback test\n");
803			return;
804		}
805
806		netdev_info(netdev, "NIC Link is up\n");
807		netif_carrier_on(netdev);
808	}
809}
810
811static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
812			       struct qlcnic_host_rds_ring *rds_ring,
813			       struct qlcnic_rx_buffer *buffer)
814{
815	struct sk_buff *skb;
816	dma_addr_t dma;
817	struct pci_dev *pdev = adapter->pdev;
818
819	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
820	if (!skb) {
821		adapter->stats.skb_alloc_failure++;
822		return -ENOMEM;
823	}
824
825	skb_reserve(skb, NET_IP_ALIGN);
826	dma = pci_map_single(pdev, skb->data,
827			     rds_ring->dma_size, PCI_DMA_FROMDEVICE);
828
829	if (pci_dma_mapping_error(pdev, dma)) {
830		adapter->stats.rx_dma_map_error++;
831		dev_kfree_skb_any(skb);
832		return -ENOMEM;
833	}
834
835	buffer->skb = skb;
836	buffer->dma = dma;
837
838	return 0;
839}
840
841static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
842					struct qlcnic_host_rds_ring *rds_ring,
843					u8 ring_id)
844{
845	struct rcv_desc *pdesc;
846	struct qlcnic_rx_buffer *buffer;
847	int  count = 0;
848	uint32_t producer, handle;
849	struct list_head *head;
850
851	if (!spin_trylock(&rds_ring->lock))
852		return;
853
854	producer = rds_ring->producer;
855	head = &rds_ring->free_list;
856	while (!list_empty(head)) {
857		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
858
859		if (!buffer->skb) {
860			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
861				break;
862		}
863		count++;
864		list_del(&buffer->list);
865
866		/* make a rcv descriptor  */
867		pdesc = &rds_ring->desc_head[producer];
868		handle = qlcnic_get_ref_handle(adapter,
869					       buffer->ref_handle, ring_id);
870		pdesc->reference_handle = cpu_to_le16(handle);
871		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
872		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
873		producer = get_next_index(producer, rds_ring->num_desc);
874	}
875	if (count) {
876		rds_ring->producer = producer;
877		writel((producer - 1) & (rds_ring->num_desc - 1),
878		       rds_ring->crb_rcv_producer);
879	}
880	spin_unlock(&rds_ring->lock);
881}
882
883static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
884				   struct qlcnic_host_tx_ring *tx_ring,
885				   int budget)
886{
887	u32 sw_consumer, hw_consumer;
888	int i, done, count = 0;
889	struct qlcnic_cmd_buffer *buffer;
890	struct pci_dev *pdev = adapter->pdev;
891	struct net_device *netdev = adapter->netdev;
892	struct qlcnic_skb_frag *frag;
893
894	if (!spin_trylock(&tx_ring->tx_clean_lock))
895		return 1;
896
897	sw_consumer = tx_ring->sw_consumer;
898	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
899
900	while (sw_consumer != hw_consumer) {
901		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
902		if (buffer->skb) {
903			frag = &buffer->frag_array[0];
904			pci_unmap_single(pdev, frag->dma, frag->length,
905					 PCI_DMA_TODEVICE);
906			frag->dma = 0ULL;
907			for (i = 1; i < buffer->frag_count; i++) {
908				frag++;
909				pci_unmap_page(pdev, frag->dma, frag->length,
910					       PCI_DMA_TODEVICE);
911				frag->dma = 0ULL;
912			}
913			tx_ring->tx_stats.xmit_finished++;
914			dev_kfree_skb_any(buffer->skb);
915			buffer->skb = NULL;
916		}
917
918		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
919		if (++count >= budget)
920			break;
921	}
922
923	tx_ring->sw_consumer = sw_consumer;
924
925	if (count && netif_running(netdev)) {
926		smp_mb();
927		if (netif_tx_queue_stopped(tx_ring->txq) &&
928		    netif_carrier_ok(netdev)) {
929			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
930				netif_tx_wake_queue(tx_ring->txq);
931				tx_ring->tx_stats.xmit_on++;
932			}
933		}
934		adapter->tx_timeo_cnt = 0;
935	}
936	/*
937	 * If everything is freed up to consumer then check if the ring is full
938	 * If the ring is full then check if more needs to be freed and
939	 * schedule the call back again.
940	 *
941	 * This happens when there are 2 CPUs. One could be freeing and the
942	 * other filling it. If the ring is full when we get out of here and
943	 * the card has already interrupted the host then the host can miss the
944	 * interrupt.
945	 *
946	 * There is still a possible race condition and the host could miss an
947	 * interrupt. The card has to take care of this.
948	 */
949	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
950	done = (sw_consumer == hw_consumer);
951
952	spin_unlock(&tx_ring->tx_clean_lock);
953
954	return done;
955}
956
957static int qlcnic_poll(struct napi_struct *napi, int budget)
958{
959	int tx_complete, work_done;
960	struct qlcnic_host_sds_ring *sds_ring;
961	struct qlcnic_adapter *adapter;
962	struct qlcnic_host_tx_ring *tx_ring;
963
964	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
965	adapter = sds_ring->adapter;
966	tx_ring = sds_ring->tx_ring;
967
968	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
969					      budget);
970	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
971
972	/* Check if we need a repoll */
973	if (!tx_complete)
974		work_done = budget;
975
976	if (work_done < budget) {
977		napi_complete(&sds_ring->napi);
978		if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
979			qlcnic_enable_sds_intr(adapter, sds_ring);
980			qlcnic_enable_tx_intr(adapter, tx_ring);
981		}
982	}
983
984	return work_done;
985}
986
987static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
988{
989	struct qlcnic_host_tx_ring *tx_ring;
990	struct qlcnic_adapter *adapter;
991	int work_done;
992
993	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
994	adapter = tx_ring->adapter;
995
996	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
997	if (work_done) {
998		napi_complete(&tx_ring->napi);
999		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1000			qlcnic_enable_tx_intr(adapter, tx_ring);
1001	} else {
1002		/* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1003		work_done = budget;
1004	}
1005
1006	return work_done;
1007}
1008
1009static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
1010{
1011	struct qlcnic_host_sds_ring *sds_ring;
1012	struct qlcnic_adapter *adapter;
1013	int work_done;
1014
1015	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1016	adapter = sds_ring->adapter;
1017
1018	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1019
1020	if (work_done < budget) {
1021		napi_complete(&sds_ring->napi);
1022		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1023			qlcnic_enable_sds_intr(adapter, sds_ring);
1024	}
1025
1026	return work_done;
1027}
1028
1029static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1030				    struct qlcnic_fw_msg *msg)
1031{
1032	u32 cable_OUI;
1033	u16 cable_len, link_speed;
1034	u8  link_status, module, duplex, autoneg, lb_status = 0;
1035	struct net_device *netdev = adapter->netdev;
1036
1037	adapter->ahw->has_link_events = 1;
1038
1039	cable_OUI = msg->body[1] & 0xffffffff;
1040	cable_len = (msg->body[1] >> 32) & 0xffff;
1041	link_speed = (msg->body[1] >> 48) & 0xffff;
1042
1043	link_status = msg->body[2] & 0xff;
1044	duplex = (msg->body[2] >> 16) & 0xff;
1045	autoneg = (msg->body[2] >> 24) & 0xff;
1046	lb_status = (msg->body[2] >> 32) & 0x3;
1047
1048	module = (msg->body[2] >> 8) & 0xff;
1049	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1050		dev_info(&netdev->dev,
1051			 "unsupported cable: OUI 0x%x, length %d\n",
1052			 cable_OUI, cable_len);
1053	else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1054		dev_info(&netdev->dev, "unsupported cable length %d\n",
1055			 cable_len);
1056
1057	if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
1058	    lb_status == QLCNIC_ELB_MODE))
1059		adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
1060
1061	qlcnic_advert_link_change(adapter, link_status);
1062
1063	if (duplex == LINKEVENT_FULL_DUPLEX)
1064		adapter->ahw->link_duplex = DUPLEX_FULL;
1065	else
1066		adapter->ahw->link_duplex = DUPLEX_HALF;
1067
1068	adapter->ahw->module_type = module;
1069	adapter->ahw->link_autoneg = autoneg;
1070
1071	if (link_status) {
1072		adapter->ahw->link_speed = link_speed;
1073	} else {
1074		adapter->ahw->link_speed = SPEED_UNKNOWN;
1075		adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
1076	}
1077}
1078
1079static void qlcnic_handle_fw_message(int desc_cnt, int index,
1080				     struct qlcnic_host_sds_ring *sds_ring)
1081{
1082	struct qlcnic_fw_msg msg;
1083	struct status_desc *desc;
1084	struct qlcnic_adapter *adapter;
1085	struct device *dev;
1086	int i = 0, opcode, ret;
1087
1088	while (desc_cnt > 0 && i < 8) {
1089		desc = &sds_ring->desc_head[index];
1090		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1091		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1092
1093		index = get_next_index(index, sds_ring->num_desc);
1094		desc_cnt--;
1095	}
1096
1097	adapter = sds_ring->adapter;
1098	dev = &adapter->pdev->dev;
1099	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1100
1101	switch (opcode) {
1102	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1103		qlcnic_handle_linkevent(adapter, &msg);
1104		break;
1105	case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
1106		ret = (u32)(msg.body[1]);
1107		switch (ret) {
1108		case 0:
1109			adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
1110			break;
1111		case 1:
1112			dev_info(dev, "loopback already in progress\n");
1113			adapter->ahw->diag_cnt = -EINPROGRESS;
1114			break;
1115		case 2:
1116			dev_info(dev, "loopback cable is not connected\n");
1117			adapter->ahw->diag_cnt = -ENODEV;
1118			break;
1119		default:
1120			dev_info(dev,
1121				 "loopback configure request failed, err %x\n",
1122				 ret);
1123			adapter->ahw->diag_cnt = -EIO;
1124			break;
1125		}
1126		break;
1127	case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
1128		qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
1129		break;
1130	default:
1131		break;
1132	}
1133}
1134
1135static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1136					    struct qlcnic_host_rds_ring *ring,
1137					    u16 index, u16 cksum)
1138{
1139	struct qlcnic_rx_buffer *buffer;
1140	struct sk_buff *skb;
1141
1142	buffer = &ring->rx_buf_arr[index];
1143	if (unlikely(buffer->skb == NULL)) {
1144		WARN_ON(1);
1145		return NULL;
1146	}
1147
1148	pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
1149			 PCI_DMA_FROMDEVICE);
1150
1151	skb = buffer->skb;
1152	if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
1153		   (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
1154		adapter->stats.csummed++;
1155		skb->ip_summed = CHECKSUM_UNNECESSARY;
1156	} else {
1157		skb_checksum_none_assert(skb);
1158	}
1159
1160
1161	buffer->skb = NULL;
1162
1163	return skb;
1164}
1165
1166static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
1167					  struct sk_buff *skb, u16 *vlan_tag)
1168{
1169	struct ethhdr *eth_hdr;
1170
1171	if (!__vlan_get_tag(skb, vlan_tag)) {
1172		eth_hdr = (struct ethhdr *)skb->data;
1173		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1174		skb_pull(skb, VLAN_HLEN);
1175	}
1176	if (!adapter->rx_pvid)
1177		return 0;
1178
1179	if (*vlan_tag == adapter->rx_pvid) {
1180		/* Outer vlan tag. Packet should follow non-vlan path */
1181		*vlan_tag = 0xffff;
1182		return 0;
1183	}
1184	if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1185		return 0;
1186
1187	return -EINVAL;
1188}
1189
1190static struct qlcnic_rx_buffer *
1191qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1192		   struct qlcnic_host_sds_ring *sds_ring, int ring,
1193		   u64 sts_data0)
1194{
1195	struct net_device *netdev = adapter->netdev;
1196	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1197	struct qlcnic_rx_buffer *buffer;
1198	struct sk_buff *skb;
1199	struct qlcnic_host_rds_ring *rds_ring;
1200	int index, length, cksum, pkt_offset, is_lb_pkt;
1201	u16 vid = 0xffff, t_vid;
1202
1203	if (unlikely(ring >= adapter->max_rds_rings))
1204		return NULL;
1205
1206	rds_ring = &recv_ctx->rds_rings[ring];
1207
1208	index = qlcnic_get_sts_refhandle(sts_data0);
1209	if (unlikely(index >= rds_ring->num_desc))
1210		return NULL;
1211
1212	buffer = &rds_ring->rx_buf_arr[index];
1213	length = qlcnic_get_sts_totallength(sts_data0);
1214	cksum  = qlcnic_get_sts_status(sts_data0);
1215	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1216
1217	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1218	if (!skb)
1219		return buffer;
1220
1221	if (adapter->rx_mac_learn) {
1222		t_vid = 0;
1223		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1224		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1225	}
1226
1227	if (length > rds_ring->skb_size)
1228		skb_put(skb, rds_ring->skb_size);
1229	else
1230		skb_put(skb, length);
1231
1232	if (pkt_offset)
1233		skb_pull(skb, pkt_offset);
1234
1235	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1236		adapter->stats.rxdropped++;
1237		dev_kfree_skb(skb);
1238		return buffer;
1239	}
1240
1241	skb->protocol = eth_type_trans(skb, netdev);
1242
1243	if (vid != 0xffff)
1244		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1245
1246	napi_gro_receive(&sds_ring->napi, skb);
1247
1248	adapter->stats.rx_pkts++;
1249	adapter->stats.rxbytes += length;
1250
1251	return buffer;
1252}
1253
1254#define QLC_TCP_HDR_SIZE            20
1255#define QLC_TCP_TS_OPTION_SIZE      12
1256#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1257
1258static struct qlcnic_rx_buffer *
1259qlcnic_process_lro(struct qlcnic_adapter *adapter,
1260		   int ring, u64 sts_data0, u64 sts_data1)
1261{
1262	struct net_device *netdev = adapter->netdev;
1263	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1264	struct qlcnic_rx_buffer *buffer;
1265	struct sk_buff *skb;
1266	struct qlcnic_host_rds_ring *rds_ring;
1267	struct iphdr *iph;
1268	struct ipv6hdr *ipv6h;
1269	struct tcphdr *th;
1270	bool push, timestamp;
1271	int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1272	u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
1273	u32 seq_number;
1274
1275	if (unlikely(ring >= adapter->max_rds_rings))
1276		return NULL;
1277
1278	rds_ring = &recv_ctx->rds_rings[ring];
1279
1280	index = qlcnic_get_lro_sts_refhandle(sts_data0);
1281	if (unlikely(index >= rds_ring->num_desc))
1282		return NULL;
1283
1284	buffer = &rds_ring->rx_buf_arr[index];
1285
1286	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1287	lro_length = qlcnic_get_lro_sts_length(sts_data0);
1288	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1289	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1290	push = qlcnic_get_lro_sts_push_flag(sts_data0);
1291	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1292
1293	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1294	if (!skb)
1295		return buffer;
1296
1297	if (adapter->rx_mac_learn) {
1298		t_vid = 0;
1299		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1300		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1301	}
1302
1303	if (timestamp)
1304		data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1305	else
1306		data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1307
1308	skb_put(skb, lro_length + data_offset);
1309	skb_pull(skb, l2_hdr_offset);
1310
1311	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1312		adapter->stats.rxdropped++;
1313		dev_kfree_skb(skb);
1314		return buffer;
1315	}
1316
1317	skb->protocol = eth_type_trans(skb, netdev);
1318
1319	if (ntohs(skb->protocol) == ETH_P_IPV6) {
1320		ipv6h = (struct ipv6hdr *)skb->data;
1321		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1322		length = (th->doff << 2) + lro_length;
1323		ipv6h->payload_len = htons(length);
1324	} else {
1325		iph = (struct iphdr *)skb->data;
1326		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1327		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1328		csum_replace2(&iph->check, iph->tot_len, htons(length));
1329		iph->tot_len = htons(length);
1330	}
1331
1332	th->psh = push;
1333	th->seq = htonl(seq_number);
1334	length = skb->len;
1335
1336	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1337		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1338		if (skb->protocol == htons(ETH_P_IPV6))
1339			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1340		else
1341			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1342	}
1343
1344	if (vid != 0xffff)
1345		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1346	netif_receive_skb(skb);
1347
1348	adapter->stats.lro_pkts++;
1349	adapter->stats.lrobytes += length;
1350
1351	return buffer;
1352}
1353
1354static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1355{
1356	struct qlcnic_host_rds_ring *rds_ring;
1357	struct qlcnic_adapter *adapter = sds_ring->adapter;
1358	struct list_head *cur;
1359	struct status_desc *desc;
1360	struct qlcnic_rx_buffer *rxbuf;
1361	int opcode, desc_cnt, count = 0;
1362	u64 sts_data0, sts_data1;
1363	u8 ring;
1364	u32 consumer = sds_ring->consumer;
1365
1366	while (count < max) {
1367		desc = &sds_ring->desc_head[consumer];
1368		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1369
1370		if (!(sts_data0 & STATUS_OWNER_HOST))
1371			break;
1372
1373		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1374		opcode = qlcnic_get_sts_opcode(sts_data0);
1375		switch (opcode) {
1376		case QLCNIC_RXPKT_DESC:
1377		case QLCNIC_OLD_RXPKT_DESC:
1378		case QLCNIC_SYN_OFFLOAD:
1379			ring = qlcnic_get_sts_type(sts_data0);
1380			rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1381						   sts_data0);
1382			break;
1383		case QLCNIC_LRO_DESC:
1384			ring = qlcnic_get_lro_sts_type(sts_data0);
1385			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1386			rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1387						   sts_data1);
1388			break;
1389		case QLCNIC_RESPONSE_DESC:
1390			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1391		default:
1392			goto skip;
1393		}
1394		WARN_ON(desc_cnt > 1);
1395
1396		if (likely(rxbuf))
1397			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1398		else
1399			adapter->stats.null_rxbuf++;
1400skip:
1401		for (; desc_cnt > 0; desc_cnt--) {
1402			desc = &sds_ring->desc_head[consumer];
1403			desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
1404			consumer = get_next_index(consumer, sds_ring->num_desc);
1405		}
1406		count++;
1407	}
1408
1409	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1410		rds_ring = &adapter->recv_ctx->rds_rings[ring];
1411		if (!list_empty(&sds_ring->free_list[ring])) {
1412			list_for_each(cur, &sds_ring->free_list[ring]) {
1413				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1414						   list);
1415				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1416			}
1417			spin_lock(&rds_ring->lock);
1418			list_splice_tail_init(&sds_ring->free_list[ring],
1419					      &rds_ring->free_list);
1420			spin_unlock(&rds_ring->lock);
1421		}
1422
1423		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1424	}
1425
1426	if (count) {
1427		sds_ring->consumer = consumer;
1428		writel(consumer, sds_ring->crb_sts_consumer);
1429	}
1430
1431	return count;
1432}
1433
1434void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1435			    struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
1436{
1437	struct rcv_desc *pdesc;
1438	struct qlcnic_rx_buffer *buffer;
1439	int count = 0;
1440	u32 producer, handle;
1441	struct list_head *head;
1442
1443	producer = rds_ring->producer;
1444	head = &rds_ring->free_list;
1445
1446	while (!list_empty(head)) {
1447
1448		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1449
1450		if (!buffer->skb) {
1451			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1452				break;
1453		}
1454
1455		count++;
1456		list_del(&buffer->list);
1457
1458		/* make a rcv descriptor  */
1459		pdesc = &rds_ring->desc_head[producer];
1460		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1461		handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1462					       ring_id);
1463		pdesc->reference_handle = cpu_to_le16(handle);
1464		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1465		producer = get_next_index(producer, rds_ring->num_desc);
1466	}
1467
1468	if (count) {
1469		rds_ring->producer = producer;
1470		writel((producer-1) & (rds_ring->num_desc-1),
1471		       rds_ring->crb_rcv_producer);
1472	}
1473}
1474
1475static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1476{
1477	if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
1478		char prefix[30];
1479
1480		scnprintf(prefix, sizeof(prefix), "%s: %s: ",
1481			  dev_name(&adapter->pdev->dev), __func__);
1482
1483		print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
1484				     skb->data, skb->len, true);
1485	}
1486}
1487
1488static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1489				    u64 sts_data0)
1490{
1491	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1492	struct sk_buff *skb;
1493	struct qlcnic_host_rds_ring *rds_ring;
1494	int index, length, cksum, pkt_offset;
1495
1496	if (unlikely(ring >= adapter->max_rds_rings))
1497		return;
1498
1499	rds_ring = &recv_ctx->rds_rings[ring];
1500
1501	index = qlcnic_get_sts_refhandle(sts_data0);
1502	length = qlcnic_get_sts_totallength(sts_data0);
1503	if (unlikely(index >= rds_ring->num_desc))
1504		return;
1505
1506	cksum  = qlcnic_get_sts_status(sts_data0);
1507	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1508
1509	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1510	if (!skb)
1511		return;
1512
1513	if (length > rds_ring->skb_size)
1514		skb_put(skb, rds_ring->skb_size);
1515	else
1516		skb_put(skb, length);
1517
1518	if (pkt_offset)
1519		skb_pull(skb, pkt_offset);
1520
1521	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1522		adapter->ahw->diag_cnt++;
1523	else
1524		dump_skb(skb, adapter);
1525
1526	dev_kfree_skb_any(skb);
1527	adapter->stats.rx_pkts++;
1528	adapter->stats.rxbytes += length;
1529
1530	return;
1531}
1532
1533void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1534{
1535	struct qlcnic_adapter *adapter = sds_ring->adapter;
1536	struct status_desc *desc;
1537	u64 sts_data0;
1538	int ring, opcode, desc_cnt;
1539
1540	u32 consumer = sds_ring->consumer;
1541
1542	desc = &sds_ring->desc_head[consumer];
1543	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1544
1545	if (!(sts_data0 & STATUS_OWNER_HOST))
1546		return;
1547
1548	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1549	opcode = qlcnic_get_sts_opcode(sts_data0);
1550	switch (opcode) {
1551	case QLCNIC_RESPONSE_DESC:
1552		qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1553		break;
1554	default:
1555		ring = qlcnic_get_sts_type(sts_data0);
1556		qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1557		break;
1558	}
1559
1560	for (; desc_cnt > 0; desc_cnt--) {
1561		desc = &sds_ring->desc_head[consumer];
1562		desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1563		consumer = get_next_index(consumer, sds_ring->num_desc);
1564	}
1565
1566	sds_ring->consumer = consumer;
1567	writel(consumer, sds_ring->crb_sts_consumer);
1568}
1569
1570int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1571			 struct net_device *netdev)
1572{
1573	int ring;
1574	struct qlcnic_host_sds_ring *sds_ring;
1575	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1576	struct qlcnic_host_tx_ring *tx_ring;
1577
1578	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
1579		return -ENOMEM;
1580
1581	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1582		sds_ring = &recv_ctx->sds_rings[ring];
1583		if (qlcnic_check_multi_tx(adapter) &&
1584		    !adapter->ahw->diag_test) {
1585			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1586				       NAPI_POLL_WEIGHT);
1587		} else {
1588			if (ring == (adapter->drv_sds_rings - 1))
1589				netif_napi_add(netdev, &sds_ring->napi,
1590					       qlcnic_poll,
1591					       NAPI_POLL_WEIGHT);
1592			else
1593				netif_napi_add(netdev, &sds_ring->napi,
1594					       qlcnic_rx_poll,
1595					       NAPI_POLL_WEIGHT);
1596		}
1597	}
1598
1599	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1600		qlcnic_free_sds_rings(recv_ctx);
1601		return -ENOMEM;
1602	}
1603
1604	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1605		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1606			tx_ring = &adapter->tx_ring[ring];
1607			netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
1608				       NAPI_POLL_WEIGHT);
1609		}
1610	}
1611
1612	return 0;
1613}
1614
1615void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1616{
1617	int ring;
1618	struct qlcnic_host_sds_ring *sds_ring;
1619	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1620	struct qlcnic_host_tx_ring *tx_ring;
1621
1622	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1623		sds_ring = &recv_ctx->sds_rings[ring];
1624		netif_napi_del(&sds_ring->napi);
1625	}
1626
1627	qlcnic_free_sds_rings(adapter->recv_ctx);
1628
1629	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1630		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1631			tx_ring = &adapter->tx_ring[ring];
1632			netif_napi_del(&tx_ring->napi);
1633		}
1634	}
1635
1636	qlcnic_free_tx_rings(adapter);
1637}
1638
1639void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1640{
1641	int ring;
1642	struct qlcnic_host_sds_ring *sds_ring;
1643	struct qlcnic_host_tx_ring *tx_ring;
1644	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1645
1646	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1647		return;
1648
1649	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1650		sds_ring = &recv_ctx->sds_rings[ring];
1651		napi_enable(&sds_ring->napi);
1652		qlcnic_enable_sds_intr(adapter, sds_ring);
1653	}
1654
1655	if (qlcnic_check_multi_tx(adapter) &&
1656	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1657	    !adapter->ahw->diag_test) {
1658		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1659			tx_ring = &adapter->tx_ring[ring];
1660			napi_enable(&tx_ring->napi);
1661			qlcnic_enable_tx_intr(adapter, tx_ring);
1662		}
1663	}
1664}
1665
1666void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1667{
1668	int ring;
1669	struct qlcnic_host_sds_ring *sds_ring;
1670	struct qlcnic_host_tx_ring *tx_ring;
1671	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1672
1673	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1674		return;
1675
1676	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1677		sds_ring = &recv_ctx->sds_rings[ring];
1678		qlcnic_disable_sds_intr(adapter, sds_ring);
1679		napi_synchronize(&sds_ring->napi);
1680		napi_disable(&sds_ring->napi);
1681	}
1682
1683	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1684	    !adapter->ahw->diag_test &&
1685	    qlcnic_check_multi_tx(adapter)) {
1686		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1687			tx_ring = &adapter->tx_ring[ring];
1688			qlcnic_disable_tx_intr(adapter, tx_ring);
1689			napi_synchronize(&tx_ring->napi);
1690			napi_disable(&tx_ring->napi);
1691		}
1692	}
1693}
1694
1695#define QLC_83XX_NORMAL_LB_PKT	(1ULL << 36)
1696#define QLC_83XX_LRO_LB_PKT	(1ULL << 46)
1697
1698static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1699{
1700	if (lro_pkt)
1701		return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1702	else
1703		return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1704}
1705
1706#define QLCNIC_ENCAP_LENGTH_MASK	0x7f
1707
1708static inline u8 qlcnic_encap_length(u64 sts_data)
1709{
1710	return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
1711}
1712
1713static struct qlcnic_rx_buffer *
1714qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1715			struct qlcnic_host_sds_ring *sds_ring,
1716			u8 ring, u64 sts_data[])
1717{
1718	struct net_device *netdev = adapter->netdev;
1719	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1720	struct qlcnic_rx_buffer *buffer;
1721	struct sk_buff *skb;
1722	struct qlcnic_host_rds_ring *rds_ring;
1723	int index, length, cksum, is_lb_pkt;
1724	u16 vid = 0xffff;
1725	int err;
1726
1727	if (unlikely(ring >= adapter->max_rds_rings))
1728		return NULL;
1729
1730	rds_ring = &recv_ctx->rds_rings[ring];
1731
1732	index = qlcnic_83xx_hndl(sts_data[0]);
1733	if (unlikely(index >= rds_ring->num_desc))
1734		return NULL;
1735
1736	buffer = &rds_ring->rx_buf_arr[index];
1737	length = qlcnic_83xx_pktln(sts_data[0]);
1738	cksum  = qlcnic_83xx_csum_status(sts_data[1]);
1739	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1740	if (!skb)
1741		return buffer;
1742
1743	if (length > rds_ring->skb_size)
1744		skb_put(skb, rds_ring->skb_size);
1745	else
1746		skb_put(skb, length);
1747
1748	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
1749
1750	if (adapter->rx_mac_learn) {
1751		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1752		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
1753	}
1754
1755	if (unlikely(err)) {
1756		adapter->stats.rxdropped++;
1757		dev_kfree_skb(skb);
1758		return buffer;
1759	}
1760
1761	skb->protocol = eth_type_trans(skb, netdev);
1762
1763	if (qlcnic_encap_length(sts_data[1]) &&
1764	    skb->ip_summed == CHECKSUM_UNNECESSARY) {
1765		skb->csum_level = 1;
1766		adapter->stats.encap_rx_csummed++;
1767	}
1768
1769	if (vid != 0xffff)
1770		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1771
1772	napi_gro_receive(&sds_ring->napi, skb);
1773
1774	adapter->stats.rx_pkts++;
1775	adapter->stats.rxbytes += length;
1776
1777	return buffer;
1778}
1779
1780static struct qlcnic_rx_buffer *
1781qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1782			u8 ring, u64 sts_data[])
1783{
1784	struct net_device *netdev = adapter->netdev;
1785	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1786	struct qlcnic_rx_buffer *buffer;
1787	struct sk_buff *skb;
1788	struct qlcnic_host_rds_ring *rds_ring;
1789	struct iphdr *iph;
1790	struct ipv6hdr *ipv6h;
1791	struct tcphdr *th;
1792	bool push;
1793	int l2_hdr_offset, l4_hdr_offset;
1794	int index, is_lb_pkt;
1795	u16 lro_length, length, data_offset, gso_size;
1796	u16 vid = 0xffff;
1797	int err;
1798
1799	if (unlikely(ring >= adapter->max_rds_rings))
1800		return NULL;
1801
1802	rds_ring = &recv_ctx->rds_rings[ring];
1803
1804	index = qlcnic_83xx_hndl(sts_data[0]);
1805	if (unlikely(index >= rds_ring->num_desc))
1806		return NULL;
1807
1808	buffer = &rds_ring->rx_buf_arr[index];
1809
1810	lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1811	l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1812	l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1813	push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1814
1815	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1816	if (!skb)
1817		return buffer;
1818
1819	if (qlcnic_83xx_is_tstamp(sts_data[1]))
1820		data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1821	else
1822		data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1823
1824	skb_put(skb, lro_length + data_offset);
1825	skb_pull(skb, l2_hdr_offset);
1826
1827	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
1828
1829	if (adapter->rx_mac_learn) {
1830		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1831		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
1832	}
1833
1834	if (unlikely(err)) {
1835		adapter->stats.rxdropped++;
1836		dev_kfree_skb(skb);
1837		return buffer;
1838	}
1839
1840	skb->protocol = eth_type_trans(skb, netdev);
1841	if (ntohs(skb->protocol) == ETH_P_IPV6) {
1842		ipv6h = (struct ipv6hdr *)skb->data;
1843		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1844
1845		length = (th->doff << 2) + lro_length;
1846		ipv6h->payload_len = htons(length);
1847	} else {
1848		iph = (struct iphdr *)skb->data;
1849		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1850		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1851		csum_replace2(&iph->check, iph->tot_len, htons(length));
1852		iph->tot_len = htons(length);
1853	}
1854
1855	th->psh = push;
1856	length = skb->len;
1857
1858	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1859		gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1860		skb_shinfo(skb)->gso_size = gso_size;
1861		if (skb->protocol == htons(ETH_P_IPV6))
1862			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1863		else
1864			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1865	}
1866
1867	if (vid != 0xffff)
1868		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1869
1870	netif_receive_skb(skb);
1871
1872	adapter->stats.lro_pkts++;
1873	adapter->stats.lrobytes += length;
1874	return buffer;
1875}
1876
1877static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1878					int max)
1879{
1880	struct qlcnic_host_rds_ring *rds_ring;
1881	struct qlcnic_adapter *adapter = sds_ring->adapter;
1882	struct list_head *cur;
1883	struct status_desc *desc;
1884	struct qlcnic_rx_buffer *rxbuf = NULL;
1885	u8 ring;
1886	u64 sts_data[2];
1887	int count = 0, opcode;
1888	u32 consumer = sds_ring->consumer;
1889
1890	while (count < max) {
1891		desc = &sds_ring->desc_head[consumer];
1892		sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1893		opcode = qlcnic_83xx_opcode(sts_data[1]);
1894		if (!opcode)
1895			break;
1896		sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1897		ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1898
1899		switch (opcode) {
1900		case QLC_83XX_REG_DESC:
1901			rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1902							ring, sts_data);
1903			break;
1904		case QLC_83XX_LRO_DESC:
1905			rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1906							sts_data);
1907			break;
1908		default:
1909			dev_info(&adapter->pdev->dev,
1910				 "Unknown opcode: 0x%x\n", opcode);
1911			goto skip;
1912		}
1913
1914		if (likely(rxbuf))
1915			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1916		else
1917			adapter->stats.null_rxbuf++;
1918skip:
1919		desc = &sds_ring->desc_head[consumer];
1920		/* Reset the descriptor */
1921		desc->status_desc_data[1] = 0;
1922		consumer = get_next_index(consumer, sds_ring->num_desc);
1923		count++;
1924	}
1925	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1926		rds_ring = &adapter->recv_ctx->rds_rings[ring];
1927		if (!list_empty(&sds_ring->free_list[ring])) {
1928			list_for_each(cur, &sds_ring->free_list[ring]) {
1929				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1930						   list);
1931				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1932			}
1933			spin_lock(&rds_ring->lock);
1934			list_splice_tail_init(&sds_ring->free_list[ring],
1935					      &rds_ring->free_list);
1936			spin_unlock(&rds_ring->lock);
1937		}
1938		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1939	}
1940	if (count) {
1941		sds_ring->consumer = consumer;
1942		writel(consumer, sds_ring->crb_sts_consumer);
1943	}
1944	return count;
1945}
1946
1947static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1948{
1949	int tx_complete;
1950	int work_done;
1951	struct qlcnic_host_sds_ring *sds_ring;
1952	struct qlcnic_adapter *adapter;
1953	struct qlcnic_host_tx_ring *tx_ring;
1954
1955	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1956	adapter = sds_ring->adapter;
1957	/* tx ring count = 1 */
1958	tx_ring = adapter->tx_ring;
1959
1960	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1961	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1962
1963	/* Check if we need a repoll */
1964	if (!tx_complete)
1965		work_done = budget;
1966
1967	if (work_done < budget) {
1968		napi_complete(&sds_ring->napi);
1969		qlcnic_enable_sds_intr(adapter, sds_ring);
1970	}
1971
1972	return work_done;
1973}
1974
1975static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1976{
1977	int tx_complete;
1978	int work_done;
1979	struct qlcnic_host_sds_ring *sds_ring;
1980	struct qlcnic_adapter *adapter;
1981	struct qlcnic_host_tx_ring *tx_ring;
1982
1983	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1984	adapter = sds_ring->adapter;
1985	/* tx ring count = 1 */
1986	tx_ring = adapter->tx_ring;
1987
1988	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1989	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1990
1991	/* Check if we need a repoll */
1992	if (!tx_complete)
1993		work_done = budget;
1994
1995	if (work_done < budget) {
1996		napi_complete(&sds_ring->napi);
1997		qlcnic_enable_sds_intr(adapter, sds_ring);
1998	}
1999
2000	return work_done;
2001}
2002
2003static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
2004{
2005	int work_done;
2006	struct qlcnic_host_tx_ring *tx_ring;
2007	struct qlcnic_adapter *adapter;
2008
2009	budget = QLCNIC_TX_POLL_BUDGET;
2010	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
2011	adapter = tx_ring->adapter;
2012	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
2013	if (work_done) {
2014		napi_complete(&tx_ring->napi);
2015		if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
2016			qlcnic_enable_tx_intr(adapter, tx_ring);
2017	} else {
2018		/* need a repoll */
2019		work_done = budget;
2020	}
2021
2022	return work_done;
2023}
2024
2025static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
2026{
2027	int work_done;
2028	struct qlcnic_host_sds_ring *sds_ring;
2029	struct qlcnic_adapter *adapter;
2030
2031	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
2032	adapter = sds_ring->adapter;
2033	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
2034	if (work_done < budget) {
2035		napi_complete(&sds_ring->napi);
2036		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2037			qlcnic_enable_sds_intr(adapter, sds_ring);
2038	}
2039
2040	return work_done;
2041}
2042
2043void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
2044{
2045	int ring;
2046	struct qlcnic_host_sds_ring *sds_ring;
2047	struct qlcnic_host_tx_ring *tx_ring;
2048	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2049
2050	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2051		return;
2052
2053	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
2054		sds_ring = &recv_ctx->sds_rings[ring];
2055		napi_enable(&sds_ring->napi);
2056		if (adapter->flags & QLCNIC_MSIX_ENABLED)
2057			qlcnic_enable_sds_intr(adapter, sds_ring);
2058	}
2059
2060	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2061	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
2062		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2063			tx_ring = &adapter->tx_ring[ring];
2064			napi_enable(&tx_ring->napi);
2065			qlcnic_enable_tx_intr(adapter, tx_ring);
2066		}
2067	}
2068}
2069
2070void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
2071{
2072	int ring;
2073	struct qlcnic_host_sds_ring *sds_ring;
2074	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2075	struct qlcnic_host_tx_ring *tx_ring;
2076
2077	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2078		return;
2079
2080	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
2081		sds_ring = &recv_ctx->sds_rings[ring];
2082		if (adapter->flags & QLCNIC_MSIX_ENABLED)
2083			qlcnic_disable_sds_intr(adapter, sds_ring);
2084		napi_synchronize(&sds_ring->napi);
2085		napi_disable(&sds_ring->napi);
2086	}
2087
2088	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2089	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
2090		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2091			tx_ring = &adapter->tx_ring[ring];
2092			qlcnic_disable_tx_intr(adapter, tx_ring);
2093			napi_synchronize(&tx_ring->napi);
2094			napi_disable(&tx_ring->napi);
2095		}
2096	}
2097}
2098
2099int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
2100			 struct net_device *netdev)
2101{
2102	int ring;
2103	struct qlcnic_host_sds_ring *sds_ring;
2104	struct qlcnic_host_tx_ring *tx_ring;
2105	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2106
2107	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
2108		return -ENOMEM;
2109
2110	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
2111		sds_ring = &recv_ctx->sds_rings[ring];
2112		if (adapter->flags & QLCNIC_MSIX_ENABLED) {
2113			if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
2114				netif_napi_add(netdev, &sds_ring->napi,
2115					       qlcnic_83xx_rx_poll,
2116					       NAPI_POLL_WEIGHT);
2117			else
2118				netif_napi_add(netdev, &sds_ring->napi,
2119					       qlcnic_83xx_msix_sriov_vf_poll,
2120					       NAPI_POLL_WEIGHT);
2121
2122		} else {
2123			netif_napi_add(netdev, &sds_ring->napi,
2124				       qlcnic_83xx_poll,
2125				       NAPI_POLL_WEIGHT);
2126		}
2127	}
2128
2129	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
2130		qlcnic_free_sds_rings(recv_ctx);
2131		return -ENOMEM;
2132	}
2133
2134	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2135	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
2136		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2137			tx_ring = &adapter->tx_ring[ring];
2138			netif_napi_add(netdev, &tx_ring->napi,
2139				       qlcnic_83xx_msix_tx_poll,
2140				       NAPI_POLL_WEIGHT);
2141		}
2142	}
2143
2144	return 0;
2145}
2146
2147void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
2148{
2149	int ring;
2150	struct qlcnic_host_sds_ring *sds_ring;
2151	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2152	struct qlcnic_host_tx_ring *tx_ring;
2153
2154	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
2155		sds_ring = &recv_ctx->sds_rings[ring];
2156		netif_napi_del(&sds_ring->napi);
2157	}
2158
2159	qlcnic_free_sds_rings(adapter->recv_ctx);
2160
2161	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2162	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
2163		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2164			tx_ring = &adapter->tx_ring[ring];
2165			netif_napi_del(&tx_ring->napi);
2166		}
2167	}
2168
2169	qlcnic_free_tx_rings(adapter);
2170}
2171
2172static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
2173					 int ring, u64 sts_data[])
2174{
2175	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2176	struct sk_buff *skb;
2177	struct qlcnic_host_rds_ring *rds_ring;
2178	int index, length;
2179
2180	if (unlikely(ring >= adapter->max_rds_rings))
2181		return;
2182
2183	rds_ring = &recv_ctx->rds_rings[ring];
2184	index = qlcnic_83xx_hndl(sts_data[0]);
2185	if (unlikely(index >= rds_ring->num_desc))
2186		return;
2187
2188	length = qlcnic_83xx_pktln(sts_data[0]);
2189
2190	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
2191	if (!skb)
2192		return;
2193
2194	if (length > rds_ring->skb_size)
2195		skb_put(skb, rds_ring->skb_size);
2196	else
2197		skb_put(skb, length);
2198
2199	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
2200		adapter->ahw->diag_cnt++;
2201	else
2202		dump_skb(skb, adapter);
2203
2204	dev_kfree_skb_any(skb);
2205	return;
2206}
2207
2208void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
2209{
2210	struct qlcnic_adapter *adapter = sds_ring->adapter;
2211	struct status_desc *desc;
2212	u64 sts_data[2];
2213	int ring, opcode;
2214	u32 consumer = sds_ring->consumer;
2215
2216	desc = &sds_ring->desc_head[consumer];
2217	sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
2218	sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
2219	opcode = qlcnic_83xx_opcode(sts_data[1]);
2220	if (!opcode)
2221		return;
2222
2223	ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
2224	qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
2225	desc = &sds_ring->desc_head[consumer];
2226	desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
2227	consumer = get_next_index(consumer, sds_ring->num_desc);
2228	sds_ring->consumer = consumer;
2229	writel(consumer, sds_ring->crb_sts_consumer);
2230}
2231