1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43 
44 #include "qlge.h"
45 
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48 
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53 
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |	*/
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61 /*  NETIF_MSG_TX_QUEUED | */
62 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 
66 static int debug = -1;	/* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 		"Option to enable MPI firmware dump. "
81 		"Default is OFF - Do Not allocate memory. ");
82 
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 		"Option to allow force of firmware core dump. "
87 		"Default is OFF - Do not allow.");
88 
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 	/* required last entry */
93 	{0,}
94 };
95 
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
102 
103 /* This hardware semaphore causes exclusive access to
104  * resources shared between the NIC driver, MPI firmware,
105  * FCOE firmware and the FC driver.
106  */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108 {
109 	u32 sem_bits = 0;
110 
111 	switch (sem_mask) {
112 	case SEM_XGMAC0_MASK:
113 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 		break;
115 	case SEM_XGMAC1_MASK:
116 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 		break;
118 	case SEM_ICB_MASK:
119 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 		break;
121 	case SEM_MAC_ADDR_MASK:
122 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 		break;
124 	case SEM_FLASH_MASK:
125 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 		break;
127 	case SEM_PROBE_MASK:
128 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 		break;
130 	case SEM_RT_IDX_MASK:
131 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 		break;
133 	case SEM_PROC_REG_MASK:
134 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 		break;
136 	default:
137 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138 		return -EINVAL;
139 	}
140 
141 	ql_write32(qdev, SEM, sem_bits | sem_mask);
142 	return !(ql_read32(qdev, SEM) & sem_bits);
143 }
144 
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 {
147 	unsigned int wait_count = 30;
148 	do {
149 		if (!ql_sem_trylock(qdev, sem_mask))
150 			return 0;
151 		udelay(100);
152 	} while (--wait_count);
153 	return -ETIMEDOUT;
154 }
155 
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 {
158 	ql_write32(qdev, SEM, sem_mask);
159 	ql_read32(qdev, SEM);	/* flush */
160 }
161 
162 /* This function waits for a specific bit to come ready
163  * in a given register.  It is used mostly by the initialize
164  * process, but is also used in kernel thread API such as
165  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166  */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 {
169 	u32 temp;
170 	int count = UDELAY_COUNT;
171 
172 	while (count) {
173 		temp = ql_read32(qdev, reg);
174 
175 		/* check for errors */
176 		if (temp & err_bit) {
177 			netif_alert(qdev, probe, qdev->ndev,
178 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
179 				    reg, temp);
180 			return -EIO;
181 		} else if (temp & bit)
182 			return 0;
183 		udelay(UDELAY_DELAY);
184 		count--;
185 	}
186 	netif_alert(qdev, probe, qdev->ndev,
187 		    "Timed out waiting for reg %x to come ready.\n", reg);
188 	return -ETIMEDOUT;
189 }
190 
191 /* The CFG register is used to download TX and RX control blocks
192  * to the chip. This function waits for an operation to complete.
193  */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 {
196 	int count = UDELAY_COUNT;
197 	u32 temp;
198 
199 	while (count) {
200 		temp = ql_read32(qdev, CFG);
201 		if (temp & CFG_LE)
202 			return -EIO;
203 		if (!(temp & bit))
204 			return 0;
205 		udelay(UDELAY_DELAY);
206 		count--;
207 	}
208 	return -ETIMEDOUT;
209 }
210 
211 
212 /* Used to issue init control blocks to hw. Maps control block,
213  * sets address, triggers download, waits for completion.
214  */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 		 u16 q_id)
217 {
218 	u64 map;
219 	int status = 0;
220 	int direction;
221 	u32 mask;
222 	u32 value;
223 
224 	direction =
225 	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 	    PCI_DMA_FROMDEVICE;
227 
228 	map = pci_map_single(qdev->pdev, ptr, size, direction);
229 	if (pci_dma_mapping_error(qdev->pdev, map)) {
230 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231 		return -ENOMEM;
232 	}
233 
234 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 	if (status)
236 		return status;
237 
238 	status = ql_wait_cfg(qdev, bit);
239 	if (status) {
240 		netif_err(qdev, ifup, qdev->ndev,
241 			  "Timed out waiting for CFG to come ready.\n");
242 		goto exit;
243 	}
244 
245 	ql_write32(qdev, ICB_L, (u32) map);
246 	ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 
248 	mask = CFG_Q_MASK | (bit << 16);
249 	value = bit | (q_id << CFG_Q_SHIFT);
250 	ql_write32(qdev, CFG, (mask | value));
251 
252 	/*
253 	 * Wait for the bit to clear after signaling hw.
254 	 */
255 	status = ql_wait_cfg(qdev, bit);
256 exit:
257 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
258 	pci_unmap_single(qdev->pdev, map, size, direction);
259 	return status;
260 }
261 
262 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 			u32 *value)
265 {
266 	u32 offset = 0;
267 	int status;
268 
269 	switch (type) {
270 	case MAC_ADDR_TYPE_MULTI_MAC:
271 	case MAC_ADDR_TYPE_CAM_MAC:
272 		{
273 			status =
274 			    ql_wait_reg_rdy(qdev,
275 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
276 			if (status)
277 				goto exit;
278 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 			status =
282 			    ql_wait_reg_rdy(qdev,
283 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
284 			if (status)
285 				goto exit;
286 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 			status =
288 			    ql_wait_reg_rdy(qdev,
289 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290 			if (status)
291 				goto exit;
292 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 			status =
296 			    ql_wait_reg_rdy(qdev,
297 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
298 			if (status)
299 				goto exit;
300 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 			if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 				status =
303 				    ql_wait_reg_rdy(qdev,
304 					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
305 				if (status)
306 					goto exit;
307 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 				status =
311 				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312 						    MAC_ADDR_MR, 0);
313 				if (status)
314 					goto exit;
315 				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 			}
317 			break;
318 		}
319 	case MAC_ADDR_TYPE_VLAN:
320 	case MAC_ADDR_TYPE_MULTI_FLTR:
321 	default:
322 		netif_crit(qdev, ifup, qdev->ndev,
323 			   "Address type %d not yet supported.\n", type);
324 		status = -EPERM;
325 	}
326 exit:
327 	return status;
328 }
329 
330 /* Set up a MAC, multicast or VLAN address for the
331  * inbound frame matching.
332  */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 			       u16 index)
335 {
336 	u32 offset = 0;
337 	int status = 0;
338 
339 	switch (type) {
340 	case MAC_ADDR_TYPE_MULTI_MAC:
341 		{
342 			u32 upper = (addr[0] << 8) | addr[1];
343 			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 					(addr[4] << 8) | (addr[5]);
345 
346 			status =
347 				ql_wait_reg_rdy(qdev,
348 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 			if (status)
350 				goto exit;
351 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 				(index << MAC_ADDR_IDX_SHIFT) |
353 				type | MAC_ADDR_E);
354 			ql_write32(qdev, MAC_ADDR_DATA, lower);
355 			status =
356 				ql_wait_reg_rdy(qdev,
357 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 			if (status)
359 				goto exit;
360 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 				(index << MAC_ADDR_IDX_SHIFT) |
362 				type | MAC_ADDR_E);
363 
364 			ql_write32(qdev, MAC_ADDR_DATA, upper);
365 			status =
366 				ql_wait_reg_rdy(qdev,
367 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 			if (status)
369 				goto exit;
370 			break;
371 		}
372 	case MAC_ADDR_TYPE_CAM_MAC:
373 		{
374 			u32 cam_output;
375 			u32 upper = (addr[0] << 8) | addr[1];
376 			u32 lower =
377 			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 			    (addr[5]);
379 			status =
380 			    ql_wait_reg_rdy(qdev,
381 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
382 			if (status)
383 				goto exit;
384 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 				   type);	/* type */
387 			ql_write32(qdev, MAC_ADDR_DATA, lower);
388 			status =
389 			    ql_wait_reg_rdy(qdev,
390 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
391 			if (status)
392 				goto exit;
393 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 				   type);	/* type */
396 			ql_write32(qdev, MAC_ADDR_DATA, upper);
397 			status =
398 			    ql_wait_reg_rdy(qdev,
399 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
400 			if (status)
401 				goto exit;
402 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
403 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
404 				   type);	/* type */
405 			/* This field should also include the queue id
406 			   and possibly the function id.  Right now we hardcode
407 			   the route field to NIC core.
408 			 */
409 			cam_output = (CAM_OUT_ROUTE_NIC |
410 				      (qdev->
411 				       func << CAM_OUT_FUNC_SHIFT) |
412 					(0 << CAM_OUT_CQ_ID_SHIFT));
413 			if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 				cam_output |= CAM_OUT_RV;
415 			/* route to NIC core */
416 			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
417 			break;
418 		}
419 	case MAC_ADDR_TYPE_VLAN:
420 		{
421 			u32 enable_bit = *((u32 *) &addr[0]);
422 			/* For VLAN, the addr actually holds a bit that
423 			 * either enables or disables the vlan id we are
424 			 * addressing. It's either MAC_ADDR_E on or off.
425 			 * That's bit-27 we're talking about.
426 			 */
427 			status =
428 			    ql_wait_reg_rdy(qdev,
429 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
430 			if (status)
431 				goto exit;
432 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
433 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
434 				   type |	/* type */
435 				   enable_bit);	/* enable/disable */
436 			break;
437 		}
438 	case MAC_ADDR_TYPE_MULTI_FLTR:
439 	default:
440 		netif_crit(qdev, ifup, qdev->ndev,
441 			   "Address type %d not yet supported.\n", type);
442 		status = -EPERM;
443 	}
444 exit:
445 	return status;
446 }
447 
448 /* Set or clear MAC address in hardware. We sometimes
449  * have to clear it to prevent wrong frame routing
450  * especially in a bonding environment.
451  */
ql_set_mac_addr(struct ql_adapter * qdev,int set)452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453 {
454 	int status;
455 	char zero_mac_addr[ETH_ALEN];
456 	char *addr;
457 
458 	if (set) {
459 		addr = &qdev->current_mac_addr[0];
460 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 			     "Set Mac addr %pM\n", addr);
462 	} else {
463 		eth_zero_addr(zero_mac_addr);
464 		addr = &zero_mac_addr[0];
465 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 			     "Clearing MAC address\n");
467 	}
468 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 	if (status)
470 		return status;
471 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 	if (status)
475 		netif_err(qdev, ifup, qdev->ndev,
476 			  "Failed to init mac address.\n");
477 	return status;
478 }
479 
ql_link_on(struct ql_adapter * qdev)480 void ql_link_on(struct ql_adapter *qdev)
481 {
482 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 	netif_carrier_on(qdev->ndev);
484 	ql_set_mac_addr(qdev, 1);
485 }
486 
ql_link_off(struct ql_adapter * qdev)487 void ql_link_off(struct ql_adapter *qdev)
488 {
489 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 	netif_carrier_off(qdev->ndev);
491 	ql_set_mac_addr(qdev, 0);
492 }
493 
494 /* Get a specific frame routing value from the CAM.
495  * Used for debug and reg dump.
496  */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498 {
499 	int status = 0;
500 
501 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
502 	if (status)
503 		goto exit;
504 
505 	ql_write32(qdev, RT_IDX,
506 		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
508 	if (status)
509 		goto exit;
510 	*value = ql_read32(qdev, RT_DATA);
511 exit:
512 	return status;
513 }
514 
515 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
516  * to route different frame types to various inbound queues.  We send broadcast/
517  * multicast/error frames to the default queue for slow handling,
518  * and CAM hit/RSS frames to the fast handling queues.
519  */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 			      int enable)
522 {
523 	int status = -EINVAL; /* Return error if no mask match. */
524 	u32 value = 0;
525 
526 	switch (mask) {
527 	case RT_IDX_CAM_HIT:
528 		{
529 			value = RT_IDX_DST_CAM_Q |	/* dest */
530 			    RT_IDX_TYPE_NICQ |	/* type */
531 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 			break;
533 		}
534 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
535 		{
536 			value = RT_IDX_DST_DFLT_Q |	/* dest */
537 			    RT_IDX_TYPE_NICQ |	/* type */
538 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 			break;
540 		}
541 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
542 		{
543 			value = RT_IDX_DST_DFLT_Q |	/* dest */
544 			    RT_IDX_TYPE_NICQ |	/* type */
545 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 			break;
547 		}
548 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 		{
550 			value = RT_IDX_DST_DFLT_Q | /* dest */
551 				RT_IDX_TYPE_NICQ | /* type */
552 				(RT_IDX_IP_CSUM_ERR_SLOT <<
553 				RT_IDX_IDX_SHIFT); /* index */
554 			break;
555 		}
556 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 		{
558 			value = RT_IDX_DST_DFLT_Q | /* dest */
559 				RT_IDX_TYPE_NICQ | /* type */
560 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 				RT_IDX_IDX_SHIFT); /* index */
562 			break;
563 		}
564 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
565 		{
566 			value = RT_IDX_DST_DFLT_Q |	/* dest */
567 			    RT_IDX_TYPE_NICQ |	/* type */
568 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 			break;
570 		}
571 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
572 		{
573 			value = RT_IDX_DST_DFLT_Q |	/* dest */
574 			    RT_IDX_TYPE_NICQ |	/* type */
575 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 			break;
577 		}
578 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
579 		{
580 			value = RT_IDX_DST_DFLT_Q |	/* dest */
581 			    RT_IDX_TYPE_NICQ |	/* type */
582 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 			break;
584 		}
585 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
586 		{
587 			value = RT_IDX_DST_RSS |	/* dest */
588 			    RT_IDX_TYPE_NICQ |	/* type */
589 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 			break;
591 		}
592 	case 0:		/* Clear the E-bit on an entry. */
593 		{
594 			value = RT_IDX_DST_DFLT_Q |	/* dest */
595 			    RT_IDX_TYPE_NICQ |	/* type */
596 			    (index << RT_IDX_IDX_SHIFT);/* index */
597 			break;
598 		}
599 	default:
600 		netif_err(qdev, ifup, qdev->ndev,
601 			  "Mask type %d not yet supported.\n", mask);
602 		status = -EPERM;
603 		goto exit;
604 	}
605 
606 	if (value) {
607 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 		if (status)
609 			goto exit;
610 		value |= (enable ? RT_IDX_E : 0);
611 		ql_write32(qdev, RT_IDX, value);
612 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 	}
614 exit:
615 	return status;
616 }
617 
ql_enable_interrupts(struct ql_adapter * qdev)618 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 {
620 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621 }
622 
ql_disable_interrupts(struct ql_adapter * qdev)623 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 {
625 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626 }
627 
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629  * Otherwise, we may have multiple outstanding workers and don't want to
630  * enable until the last one finishes. In this case, the irq_cnt gets
631  * incremented every time we queue a worker and decremented every time
632  * a worker finishes.  Once it hits zero we enable the interrupt.
633  */
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
635 {
636 	u32 var = 0;
637 	unsigned long hw_flags = 0;
638 	struct intr_context *ctx = qdev->intr_context + intr;
639 
640 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 		/* Always enable if we're MSIX multi interrupts and
642 		 * it's not the default (zeroeth) interrupt.
643 		 */
644 		ql_write32(qdev, INTR_EN,
645 			   ctx->intr_en_mask);
646 		var = ql_read32(qdev, STS);
647 		return var;
648 	}
649 
650 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 	if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 		ql_write32(qdev, INTR_EN,
653 			   ctx->intr_en_mask);
654 		var = ql_read32(qdev, STS);
655 	}
656 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 	return var;
658 }
659 
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661 {
662 	u32 var = 0;
663 	struct intr_context *ctx;
664 
665 	/* HW disables for us if we're MSIX multi interrupts and
666 	 * it's not the default (zeroeth) interrupt.
667 	 */
668 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 		return 0;
670 
671 	ctx = qdev->intr_context + intr;
672 	spin_lock(&qdev->hw_lock);
673 	if (!atomic_read(&ctx->irq_cnt)) {
674 		ql_write32(qdev, INTR_EN,
675 		ctx->intr_dis_mask);
676 		var = ql_read32(qdev, STS);
677 	}
678 	atomic_inc(&ctx->irq_cnt);
679 	spin_unlock(&qdev->hw_lock);
680 	return var;
681 }
682 
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684 {
685 	int i;
686 	for (i = 0; i < qdev->intr_count; i++) {
687 		/* The enable call does a atomic_dec_and_test
688 		 * and enables only if the result is zero.
689 		 * So we precharge it here.
690 		 */
691 		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 			i == 0))
693 			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 		ql_enable_completion_interrupt(qdev, i);
695 	}
696 
697 }
698 
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700 {
701 	int status, i;
702 	u16 csum = 0;
703 	__le16 *flash = (__le16 *)&qdev->flash;
704 
705 	status = strncmp((char *)&qdev->flash, str, 4);
706 	if (status) {
707 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
708 		return	status;
709 	}
710 
711 	for (i = 0; i < size; i++)
712 		csum += le16_to_cpu(*flash++);
713 
714 	if (csum)
715 		netif_err(qdev, ifup, qdev->ndev,
716 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
717 
718 	return csum;
719 }
720 
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
722 {
723 	int status = 0;
724 	/* wait for reg to come ready */
725 	status = ql_wait_reg_rdy(qdev,
726 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 	if (status)
728 		goto exit;
729 	/* set up for reg read */
730 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 	/* wait for reg to come ready */
732 	status = ql_wait_reg_rdy(qdev,
733 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 	if (status)
735 		goto exit;
736 	 /* This data is stored on flash as an array of
737 	 * __le32.  Since ql_read32() returns cpu endian
738 	 * we need to swap it back.
739 	 */
740 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
741 exit:
742 	return status;
743 }
744 
ql_get_8000_flash_params(struct ql_adapter * qdev)745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746 {
747 	u32 i, size;
748 	int status;
749 	__le32 *p = (__le32 *)&qdev->flash;
750 	u32 offset;
751 	u8 mac_addr[6];
752 
753 	/* Get flash offset for function and adjust
754 	 * for dword access.
755 	 */
756 	if (!qdev->port)
757 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 	else
759 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760 
761 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 		return -ETIMEDOUT;
763 
764 	size = sizeof(struct flash_params_8000) / sizeof(u32);
765 	for (i = 0; i < size; i++, p++) {
766 		status = ql_read_flash_word(qdev, i+offset, p);
767 		if (status) {
768 			netif_err(qdev, ifup, qdev->ndev,
769 				  "Error reading flash.\n");
770 			goto exit;
771 		}
772 	}
773 
774 	status = ql_validate_flash(qdev,
775 			sizeof(struct flash_params_8000) / sizeof(u16),
776 			"8000");
777 	if (status) {
778 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
779 		status = -EINVAL;
780 		goto exit;
781 	}
782 
783 	/* Extract either manufacturer or BOFM modified
784 	 * MAC address.
785 	 */
786 	if (qdev->flash.flash_params_8000.data_type1 == 2)
787 		memcpy(mac_addr,
788 			qdev->flash.flash_params_8000.mac_addr1,
789 			qdev->ndev->addr_len);
790 	else
791 		memcpy(mac_addr,
792 			qdev->flash.flash_params_8000.mac_addr,
793 			qdev->ndev->addr_len);
794 
795 	if (!is_valid_ether_addr(mac_addr)) {
796 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
797 		status = -EINVAL;
798 		goto exit;
799 	}
800 
801 	memcpy(qdev->ndev->dev_addr,
802 		mac_addr,
803 		qdev->ndev->addr_len);
804 
805 exit:
806 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 	return status;
808 }
809 
ql_get_8012_flash_params(struct ql_adapter * qdev)810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
811 {
812 	int i;
813 	int status;
814 	__le32 *p = (__le32 *)&qdev->flash;
815 	u32 offset = 0;
816 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817 
818 	/* Second function's parameters follow the first
819 	 * function's.
820 	 */
821 	if (qdev->port)
822 		offset = size;
823 
824 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 		return -ETIMEDOUT;
826 
827 	for (i = 0; i < size; i++, p++) {
828 		status = ql_read_flash_word(qdev, i+offset, p);
829 		if (status) {
830 			netif_err(qdev, ifup, qdev->ndev,
831 				  "Error reading flash.\n");
832 			goto exit;
833 		}
834 
835 	}
836 
837 	status = ql_validate_flash(qdev,
838 			sizeof(struct flash_params_8012) / sizeof(u16),
839 			"8012");
840 	if (status) {
841 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
842 		status = -EINVAL;
843 		goto exit;
844 	}
845 
846 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 		status = -EINVAL;
848 		goto exit;
849 	}
850 
851 	memcpy(qdev->ndev->dev_addr,
852 		qdev->flash.flash_params_8012.mac_addr,
853 		qdev->ndev->addr_len);
854 
855 exit:
856 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 	return status;
858 }
859 
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861  * register pair.  Each read/write requires us to wait for the ready
862  * bit before reading/writing the data.
863  */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865 {
866 	int status;
867 	/* wait for reg to come ready */
868 	status = ql_wait_reg_rdy(qdev,
869 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 	if (status)
871 		return status;
872 	/* write the data to the data reg */
873 	ql_write32(qdev, XGMAC_DATA, data);
874 	/* trigger the write */
875 	ql_write32(qdev, XGMAC_ADDR, reg);
876 	return status;
877 }
878 
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880  * register pair.  Each read/write requires us to wait for the ready
881  * bit before reading/writing the data.
882  */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884 {
885 	int status = 0;
886 	/* wait for reg to come ready */
887 	status = ql_wait_reg_rdy(qdev,
888 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 	if (status)
890 		goto exit;
891 	/* set up for reg read */
892 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 	/* wait for reg to come ready */
894 	status = ql_wait_reg_rdy(qdev,
895 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 	if (status)
897 		goto exit;
898 	/* get the data */
899 	*data = ql_read32(qdev, XGMAC_DATA);
900 exit:
901 	return status;
902 }
903 
904 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906 {
907 	int status = 0;
908 	u32 hi = 0;
909 	u32 lo = 0;
910 
911 	status = ql_read_xgmac_reg(qdev, reg, &lo);
912 	if (status)
913 		goto exit;
914 
915 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 	if (status)
917 		goto exit;
918 
919 	*data = (u64) lo | ((u64) hi << 32);
920 
921 exit:
922 	return status;
923 }
924 
ql_8000_port_initialize(struct ql_adapter * qdev)925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
926 {
927 	int status;
928 	/*
929 	 * Get MPI firmware version for driver banner
930 	 * and ethool info.
931 	 */
932 	status = ql_mb_about_fw(qdev);
933 	if (status)
934 		goto exit;
935 	status = ql_mb_get_fw_state(qdev);
936 	if (status)
937 		goto exit;
938 	/* Wake up a worker to get/set the TX/RX frame sizes. */
939 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940 exit:
941 	return status;
942 }
943 
944 /* Take the MAC Core out of reset.
945  * Enable statistics counting.
946  * Take the transmitter/receiver out of reset.
947  * This functionality may be done in the MPI firmware at a
948  * later date.
949  */
ql_8012_port_initialize(struct ql_adapter * qdev)950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
951 {
952 	int status = 0;
953 	u32 data;
954 
955 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 		/* Another function has the semaphore, so
957 		 * wait for the port init bit to come ready.
958 		 */
959 		netif_info(qdev, link, qdev->ndev,
960 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 		if (status) {
963 			netif_crit(qdev, link, qdev->ndev,
964 				   "Port initialize timed out.\n");
965 		}
966 		return status;
967 	}
968 
969 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 	/* Set the core reset. */
971 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 	if (status)
973 		goto end;
974 	data |= GLOBAL_CFG_RESET;
975 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 	if (status)
977 		goto end;
978 
979 	/* Clear the core reset and turn on jumbo for receiver. */
980 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
981 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
982 	data |= GLOBAL_CFG_TX_STAT_EN;
983 	data |= GLOBAL_CFG_RX_STAT_EN;
984 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 	if (status)
986 		goto end;
987 
988 	/* Enable transmitter, and clear it's reset. */
989 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 	if (status)
991 		goto end;
992 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
993 	data |= TX_CFG_EN;	/* Enable the transmitter. */
994 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 	if (status)
996 		goto end;
997 
998 	/* Enable receiver and clear it's reset. */
999 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 	if (status)
1001 		goto end;
1002 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1003 	data |= RX_CFG_EN;	/* Enable the receiver. */
1004 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 	if (status)
1006 		goto end;
1007 
1008 	/* Turn on jumbo. */
1009 	status =
1010 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 	if (status)
1012 		goto end;
1013 	status =
1014 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 	if (status)
1016 		goto end;
1017 
1018 	/* Signal to the world that the port is enabled.        */
1019 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 end:
1021 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 	return status;
1023 }
1024 
ql_lbq_block_size(struct ql_adapter * qdev)1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 {
1027 	return PAGE_SIZE << qdev->lbq_buf_order;
1028 }
1029 
1030 /* Get the next large buffer. */
ql_get_curr_lbuf(struct rx_ring * rx_ring)1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 {
1033 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 	rx_ring->lbq_curr_idx++;
1035 	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 		rx_ring->lbq_curr_idx = 0;
1037 	rx_ring->lbq_free_cnt++;
1038 	return lbq_desc;
1039 }
1040 
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 		struct rx_ring *rx_ring)
1043 {
1044 	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045 
1046 	pci_dma_sync_single_for_cpu(qdev->pdev,
1047 					dma_unmap_addr(lbq_desc, mapaddr),
1048 				    rx_ring->lbq_buf_size,
1049 					PCI_DMA_FROMDEVICE);
1050 
1051 	/* If it's the last chunk of our master page then
1052 	 * we unmap it.
1053 	 */
1054 	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 					== ql_lbq_block_size(qdev))
1056 		pci_unmap_page(qdev->pdev,
1057 				lbq_desc->p.pg_chunk.map,
1058 				ql_lbq_block_size(qdev),
1059 				PCI_DMA_FROMDEVICE);
1060 	return lbq_desc;
1061 }
1062 
1063 /* Get the next small buffer. */
ql_get_curr_sbuf(struct rx_ring * rx_ring)1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 {
1066 	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 	rx_ring->sbq_curr_idx++;
1068 	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 		rx_ring->sbq_curr_idx = 0;
1070 	rx_ring->sbq_free_cnt++;
1071 	return sbq_desc;
1072 }
1073 
1074 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)1075 static void ql_update_cq(struct rx_ring *rx_ring)
1076 {
1077 	rx_ring->cnsmr_idx++;
1078 	rx_ring->curr_entry++;
1079 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 		rx_ring->cnsmr_idx = 0;
1081 		rx_ring->curr_entry = rx_ring->cq_base;
1082 	}
1083 }
1084 
ql_write_cq_idx(struct rx_ring * rx_ring)1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 {
1087 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088 }
1089 
ql_get_next_chunk(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 						struct bq_desc *lbq_desc)
1092 {
1093 	if (!rx_ring->pg_chunk.page) {
1094 		u64 map;
1095 		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 						GFP_ATOMIC,
1097 						qdev->lbq_buf_order);
1098 		if (unlikely(!rx_ring->pg_chunk.page)) {
1099 			netif_err(qdev, drv, qdev->ndev,
1100 				  "page allocation failed.\n");
1101 			return -ENOMEM;
1102 		}
1103 		rx_ring->pg_chunk.offset = 0;
1104 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 					0, ql_lbq_block_size(qdev),
1106 					PCI_DMA_FROMDEVICE);
1107 		if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 			__free_pages(rx_ring->pg_chunk.page,
1109 					qdev->lbq_buf_order);
1110 			rx_ring->pg_chunk.page = NULL;
1111 			netif_err(qdev, drv, qdev->ndev,
1112 				  "PCI mapping failed.\n");
1113 			return -ENOMEM;
1114 		}
1115 		rx_ring->pg_chunk.map = map;
1116 		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 	}
1118 
1119 	/* Copy the current master pg_chunk info
1120 	 * to the current descriptor.
1121 	 */
1122 	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123 
1124 	/* Adjust the master page chunk for next
1125 	 * buffer get.
1126 	 */
1127 	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 		rx_ring->pg_chunk.page = NULL;
1130 		lbq_desc->p.pg_chunk.last_flag = 1;
1131 	} else {
1132 		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 		get_page(rx_ring->pg_chunk.page);
1134 		lbq_desc->p.pg_chunk.last_flag = 0;
1135 	}
1136 	return 0;
1137 }
1138 /* Process (refill) a large buffer queue. */
ql_update_lbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140 {
1141 	u32 clean_idx = rx_ring->lbq_clean_idx;
1142 	u32 start_idx = clean_idx;
1143 	struct bq_desc *lbq_desc;
1144 	u64 map;
1145 	int i;
1146 
1147 	while (rx_ring->lbq_free_cnt > 32) {
1148 		for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 				     "lbq: try cleaning clean_idx = %d.\n",
1151 				     clean_idx);
1152 			lbq_desc = &rx_ring->lbq[clean_idx];
1153 			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 				rx_ring->lbq_clean_idx = clean_idx;
1155 				netif_err(qdev, ifup, qdev->ndev,
1156 						"Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157 						i, clean_idx);
1158 				return;
1159 			}
1160 
1161 			map = lbq_desc->p.pg_chunk.map +
1162 				lbq_desc->p.pg_chunk.offset;
1163 				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 			dma_unmap_len_set(lbq_desc, maplen,
1165 					rx_ring->lbq_buf_size);
1166 				*lbq_desc->addr = cpu_to_le64(map);
1167 
1168 			pci_dma_sync_single_for_device(qdev->pdev, map,
1169 						rx_ring->lbq_buf_size,
1170 						PCI_DMA_FROMDEVICE);
1171 			clean_idx++;
1172 			if (clean_idx == rx_ring->lbq_len)
1173 				clean_idx = 0;
1174 		}
1175 
1176 		rx_ring->lbq_clean_idx = clean_idx;
1177 		rx_ring->lbq_prod_idx += 16;
1178 		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 			rx_ring->lbq_prod_idx = 0;
1180 		rx_ring->lbq_free_cnt -= 16;
1181 	}
1182 
1183 	if (start_idx != clean_idx) {
1184 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 			     "lbq: updating prod idx = %d.\n",
1186 			     rx_ring->lbq_prod_idx);
1187 		ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 				rx_ring->lbq_prod_idx_db_reg);
1189 	}
1190 }
1191 
1192 /* Process (refill) a small buffer queue. */
ql_update_sbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194 {
1195 	u32 clean_idx = rx_ring->sbq_clean_idx;
1196 	u32 start_idx = clean_idx;
1197 	struct bq_desc *sbq_desc;
1198 	u64 map;
1199 	int i;
1200 
1201 	while (rx_ring->sbq_free_cnt > 16) {
1202 		for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203 			sbq_desc = &rx_ring->sbq[clean_idx];
1204 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 				     "sbq: try cleaning clean_idx = %d.\n",
1206 				     clean_idx);
1207 			if (sbq_desc->p.skb == NULL) {
1208 				netif_printk(qdev, rx_status, KERN_DEBUG,
1209 					     qdev->ndev,
1210 					     "sbq: getting new skb for index %d.\n",
1211 					     sbq_desc->index);
1212 				sbq_desc->p.skb =
1213 				    netdev_alloc_skb(qdev->ndev,
1214 						     SMALL_BUFFER_SIZE);
1215 				if (sbq_desc->p.skb == NULL) {
1216 					rx_ring->sbq_clean_idx = clean_idx;
1217 					return;
1218 				}
1219 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 				map = pci_map_single(qdev->pdev,
1221 						     sbq_desc->p.skb->data,
1222 						     rx_ring->sbq_buf_size,
1223 						     PCI_DMA_FROMDEVICE);
1224 				if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 					netif_err(qdev, ifup, qdev->ndev,
1226 						  "PCI mapping failed.\n");
1227 					rx_ring->sbq_clean_idx = clean_idx;
1228 					dev_kfree_skb_any(sbq_desc->p.skb);
1229 					sbq_desc->p.skb = NULL;
1230 					return;
1231 				}
1232 				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 				dma_unmap_len_set(sbq_desc, maplen,
1234 						  rx_ring->sbq_buf_size);
1235 				*sbq_desc->addr = cpu_to_le64(map);
1236 			}
1237 
1238 			clean_idx++;
1239 			if (clean_idx == rx_ring->sbq_len)
1240 				clean_idx = 0;
1241 		}
1242 		rx_ring->sbq_clean_idx = clean_idx;
1243 		rx_ring->sbq_prod_idx += 16;
1244 		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 			rx_ring->sbq_prod_idx = 0;
1246 		rx_ring->sbq_free_cnt -= 16;
1247 	}
1248 
1249 	if (start_idx != clean_idx) {
1250 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 			     "sbq: updating prod idx = %d.\n",
1252 			     rx_ring->sbq_prod_idx);
1253 		ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 				rx_ring->sbq_prod_idx_db_reg);
1255 	}
1256 }
1257 
ql_update_buffer_queues(struct ql_adapter * qdev,struct rx_ring * rx_ring)1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 				    struct rx_ring *rx_ring)
1260 {
1261 	ql_update_sbq(qdev, rx_ring);
1262 	ql_update_lbq(qdev, rx_ring);
1263 }
1264 
1265 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 			  struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271 	int i;
1272 	for (i = 0; i < mapped; i++) {
1273 		if (i == 0 || (i == 7 && mapped > 7)) {
1274 			/*
1275 			 * Unmap the skb->data area, or the
1276 			 * external sglist (AKA the Outbound
1277 			 * Address List (OAL)).
1278 			 * If its the zeroeth element, then it's
1279 			 * the skb->data area.  If it's the 7th
1280 			 * element and there is more than 6 frags,
1281 			 * then its an OAL.
1282 			 */
1283 			if (i == 7) {
1284 				netif_printk(qdev, tx_done, KERN_DEBUG,
1285 					     qdev->ndev,
1286 					     "unmapping OAL area.\n");
1287 			}
1288 			pci_unmap_single(qdev->pdev,
1289 					 dma_unmap_addr(&tx_ring_desc->map[i],
1290 							mapaddr),
1291 					 dma_unmap_len(&tx_ring_desc->map[i],
1292 						       maplen),
1293 					 PCI_DMA_TODEVICE);
1294 		} else {
1295 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 				     "unmapping frag %d.\n", i);
1297 			pci_unmap_page(qdev->pdev,
1298 				       dma_unmap_addr(&tx_ring_desc->map[i],
1299 						      mapaddr),
1300 				       dma_unmap_len(&tx_ring_desc->map[i],
1301 						     maplen), PCI_DMA_TODEVICE);
1302 		}
1303 	}
1304 
1305 }
1306 
1307 /* Map the buffers for this transmit.  This will return
1308  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309  */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1310 static int ql_map_send(struct ql_adapter *qdev,
1311 		       struct ob_mac_iocb_req *mac_iocb_ptr,
1312 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314 	int len = skb_headlen(skb);
1315 	dma_addr_t map;
1316 	int frag_idx, err, map_idx = 0;
1317 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1319 
1320 	if (frag_cnt) {
1321 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 			     "frag_cnt = %d.\n", frag_cnt);
1323 	}
1324 	/*
1325 	 * Map the skb buffer first.
1326 	 */
1327 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328 
1329 	err = pci_dma_mapping_error(qdev->pdev, map);
1330 	if (err) {
1331 		netif_err(qdev, tx_queued, qdev->ndev,
1332 			  "PCI mapping failed with error: %d\n", err);
1333 
1334 		return NETDEV_TX_BUSY;
1335 	}
1336 
1337 	tbd->len = cpu_to_le32(len);
1338 	tbd->addr = cpu_to_le64(map);
1339 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341 	map_idx++;
1342 
1343 	/*
1344 	 * This loop fills the remainder of the 8 address descriptors
1345 	 * in the IOCB.  If there are more than 7 fragments, then the
1346 	 * eighth address desc will point to an external list (OAL).
1347 	 * When this happens, the remainder of the frags will be stored
1348 	 * in this list.
1349 	 */
1350 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 		tbd++;
1353 		if (frag_idx == 6 && frag_cnt > 7) {
1354 			/* Let's tack on an sglist.
1355 			 * Our control block will now
1356 			 * look like this:
1357 			 * iocb->seg[0] = skb->data
1358 			 * iocb->seg[1] = frag[0]
1359 			 * iocb->seg[2] = frag[1]
1360 			 * iocb->seg[3] = frag[2]
1361 			 * iocb->seg[4] = frag[3]
1362 			 * iocb->seg[5] = frag[4]
1363 			 * iocb->seg[6] = frag[5]
1364 			 * iocb->seg[7] = ptr to OAL (external sglist)
1365 			 * oal->seg[0] = frag[6]
1366 			 * oal->seg[1] = frag[7]
1367 			 * oal->seg[2] = frag[8]
1368 			 * oal->seg[3] = frag[9]
1369 			 * oal->seg[4] = frag[10]
1370 			 *      etc...
1371 			 */
1372 			/* Tack on the OAL in the eighth segment of IOCB. */
1373 			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 					     sizeof(struct oal),
1375 					     PCI_DMA_TODEVICE);
1376 			err = pci_dma_mapping_error(qdev->pdev, map);
1377 			if (err) {
1378 				netif_err(qdev, tx_queued, qdev->ndev,
1379 					  "PCI mapping outbound address list with error: %d\n",
1380 					  err);
1381 				goto map_error;
1382 			}
1383 
1384 			tbd->addr = cpu_to_le64(map);
1385 			/*
1386 			 * The length is the number of fragments
1387 			 * that remain to be mapped times the length
1388 			 * of our sglist (OAL).
1389 			 */
1390 			tbd->len =
1391 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394 					   map);
1395 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 					  sizeof(struct oal));
1397 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 			map_idx++;
1399 		}
1400 
1401 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402 				       DMA_TO_DEVICE);
1403 
1404 		err = dma_mapping_error(&qdev->pdev->dev, map);
1405 		if (err) {
1406 			netif_err(qdev, tx_queued, qdev->ndev,
1407 				  "PCI mapping frags failed with error: %d.\n",
1408 				  err);
1409 			goto map_error;
1410 		}
1411 
1412 		tbd->addr = cpu_to_le64(map);
1413 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 				  skb_frag_size(frag));
1417 
1418 	}
1419 	/* Save the number of segments we've mapped. */
1420 	tx_ring_desc->map_cnt = map_idx;
1421 	/* Terminate the last segment. */
1422 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 	return NETDEV_TX_OK;
1424 
1425 map_error:
1426 	/*
1427 	 * If the first frag mapping failed, then i will be zero.
1428 	 * This causes the unmap of the skb->data area.  Otherwise
1429 	 * we pass in the number of frags that mapped successfully
1430 	 * so they can be umapped.
1431 	 */
1432 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 	return NETDEV_TX_BUSY;
1434 }
1435 
1436 /* Categorizing receive firmware frame errors */
ql_categorize_rx_err(struct ql_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 				 struct rx_ring *rx_ring)
1439 {
1440 	struct nic_stats *stats = &qdev->nic_stats;
1441 
1442 	stats->rx_err_count++;
1443 	rx_ring->rx_errors++;
1444 
1445 	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 		stats->rx_code_err++;
1448 		break;
1449 	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 		stats->rx_oversize_err++;
1451 		break;
1452 	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 		stats->rx_undersize_err++;
1454 		break;
1455 	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 		stats->rx_preamble_err++;
1457 		break;
1458 	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 		stats->rx_frame_len_err++;
1460 		break;
1461 	case IB_MAC_IOCB_RSP_ERR_CRC:
1462 		stats->rx_crc_err++;
1463 	default:
1464 		break;
1465 	}
1466 }
1467 
1468 /**
1469  * ql_update_mac_hdr_len - helper routine to update the mac header length
1470  * based on vlan tags if present
1471  */
ql_update_mac_hdr_len(struct ql_adapter * qdev,struct ib_mac_iocb_rsp * ib_mac_rsp,void * page,size_t * len)1472 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 				  struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 				  void *page, size_t *len)
1475 {
1476 	u16 *tags;
1477 
1478 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479 		return;
1480 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481 		tags = (u16 *)page;
1482 		/* Look for stacked vlan tags in ethertype field */
1483 		if (tags[6] == ETH_P_8021Q &&
1484 		    tags[8] == ETH_P_8021Q)
1485 			*len += 2 * VLAN_HLEN;
1486 		else
1487 			*len += VLAN_HLEN;
1488 	}
1489 }
1490 
1491 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1492 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 					struct rx_ring *rx_ring,
1494 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 					u32 length,
1496 					u16 vlan_id)
1497 {
1498 	struct sk_buff *skb;
1499 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500 	struct napi_struct *napi = &rx_ring->napi;
1501 
1502 	/* Frame error, so drop the packet. */
1503 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 		put_page(lbq_desc->p.pg_chunk.page);
1506 		return;
1507 	}
1508 	napi->dev = qdev->ndev;
1509 
1510 	skb = napi_get_frags(napi);
1511 	if (!skb) {
1512 		netif_err(qdev, drv, qdev->ndev,
1513 			  "Couldn't get an skb, exiting.\n");
1514 		rx_ring->rx_dropped++;
1515 		put_page(lbq_desc->p.pg_chunk.page);
1516 		return;
1517 	}
1518 	prefetch(lbq_desc->p.pg_chunk.va);
1519 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 			     lbq_desc->p.pg_chunk.page,
1521 			     lbq_desc->p.pg_chunk.offset,
1522 			     length);
1523 
1524 	skb->len += length;
1525 	skb->data_len += length;
1526 	skb->truesize += length;
1527 	skb_shinfo(skb)->nr_frags++;
1528 
1529 	rx_ring->rx_packets++;
1530 	rx_ring->rx_bytes += length;
1531 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 	skb_record_rx_queue(skb, rx_ring->cq_id);
1533 	if (vlan_id != 0xffff)
1534 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535 	napi_gro_frags(napi);
1536 }
1537 
1538 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1539 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 					struct rx_ring *rx_ring,
1541 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1542 					u32 length,
1543 					u16 vlan_id)
1544 {
1545 	struct net_device *ndev = qdev->ndev;
1546 	struct sk_buff *skb = NULL;
1547 	void *addr;
1548 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 	struct napi_struct *napi = &rx_ring->napi;
1550 	size_t hlen = ETH_HLEN;
1551 
1552 	skb = netdev_alloc_skb(ndev, length);
1553 	if (!skb) {
1554 		rx_ring->rx_dropped++;
1555 		put_page(lbq_desc->p.pg_chunk.page);
1556 		return;
1557 	}
1558 
1559 	addr = lbq_desc->p.pg_chunk.va;
1560 	prefetch(addr);
1561 
1562 	/* Frame error, so drop the packet. */
1563 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565 		goto err_out;
1566 	}
1567 
1568 	/* Update the MAC header length*/
1569 	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570 
1571 	/* The max framesize filter on this chip is set higher than
1572 	 * MTU since FCoE uses 2k frames.
1573 	 */
1574 	if (skb->len > ndev->mtu + hlen) {
1575 		netif_err(qdev, drv, qdev->ndev,
1576 			  "Segment too small, dropping.\n");
1577 		rx_ring->rx_dropped++;
1578 		goto err_out;
1579 	}
1580 	memcpy(skb_put(skb, hlen), addr, hlen);
1581 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583 		     length);
1584 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585 				lbq_desc->p.pg_chunk.offset + hlen,
1586 				length - hlen);
1587 	skb->len += length - hlen;
1588 	skb->data_len += length - hlen;
1589 	skb->truesize += length - hlen;
1590 
1591 	rx_ring->rx_packets++;
1592 	rx_ring->rx_bytes += skb->len;
1593 	skb->protocol = eth_type_trans(skb, ndev);
1594 	skb_checksum_none_assert(skb);
1595 
1596 	if ((ndev->features & NETIF_F_RXCSUM) &&
1597 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598 		/* TCP frame. */
1599 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 				     "TCP checksum done!\n");
1602 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 			/* Unfragmented ipv4 UDP frame. */
1606 			struct iphdr *iph =
1607 				(struct iphdr *)((u8 *)addr + hlen);
1608 			if (!(iph->frag_off &
1609 				htons(IP_MF|IP_OFFSET))) {
1610 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1611 				netif_printk(qdev, rx_status, KERN_DEBUG,
1612 					     qdev->ndev,
1613 					     "UDP checksum done!\n");
1614 			}
1615 		}
1616 	}
1617 
1618 	skb_record_rx_queue(skb, rx_ring->cq_id);
1619 	if (vlan_id != 0xffff)
1620 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 		napi_gro_receive(napi, skb);
1623 	else
1624 		netif_receive_skb(skb);
1625 	return;
1626 err_out:
1627 	dev_kfree_skb_any(skb);
1628 	put_page(lbq_desc->p.pg_chunk.page);
1629 }
1630 
1631 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1632 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 					struct rx_ring *rx_ring,
1634 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1635 					u32 length,
1636 					u16 vlan_id)
1637 {
1638 	struct net_device *ndev = qdev->ndev;
1639 	struct sk_buff *skb = NULL;
1640 	struct sk_buff *new_skb = NULL;
1641 	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642 
1643 	skb = sbq_desc->p.skb;
1644 	/* Allocate new_skb and copy */
1645 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 	if (new_skb == NULL) {
1647 		rx_ring->rx_dropped++;
1648 		return;
1649 	}
1650 	skb_reserve(new_skb, NET_IP_ALIGN);
1651 	memcpy(skb_put(new_skb, length), skb->data, length);
1652 	skb = new_skb;
1653 
1654 	/* Frame error, so drop the packet. */
1655 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1656 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1657 		dev_kfree_skb_any(skb);
1658 		return;
1659 	}
1660 
1661 	/* loopback self test for ethtool */
1662 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1663 		ql_check_lb_frame(qdev, skb);
1664 		dev_kfree_skb_any(skb);
1665 		return;
1666 	}
1667 
1668 	/* The max framesize filter on this chip is set higher than
1669 	 * MTU since FCoE uses 2k frames.
1670 	 */
1671 	if (skb->len > ndev->mtu + ETH_HLEN) {
1672 		dev_kfree_skb_any(skb);
1673 		rx_ring->rx_dropped++;
1674 		return;
1675 	}
1676 
1677 	prefetch(skb->data);
1678 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1679 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1680 			     "%s Multicast.\n",
1681 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1682 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1683 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1684 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1685 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1686 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1687 	}
1688 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1689 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 			     "Promiscuous Packet.\n");
1691 
1692 	rx_ring->rx_packets++;
1693 	rx_ring->rx_bytes += skb->len;
1694 	skb->protocol = eth_type_trans(skb, ndev);
1695 	skb_checksum_none_assert(skb);
1696 
1697 	/* If rx checksum is on, and there are no
1698 	 * csum or frame errors.
1699 	 */
1700 	if ((ndev->features & NETIF_F_RXCSUM) &&
1701 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1702 		/* TCP frame. */
1703 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1704 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705 				     "TCP checksum done!\n");
1706 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1707 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1708 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1709 			/* Unfragmented ipv4 UDP frame. */
1710 			struct iphdr *iph = (struct iphdr *) skb->data;
1711 			if (!(iph->frag_off &
1712 				htons(IP_MF|IP_OFFSET))) {
1713 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1714 				netif_printk(qdev, rx_status, KERN_DEBUG,
1715 					     qdev->ndev,
1716 					     "UDP checksum done!\n");
1717 			}
1718 		}
1719 	}
1720 
1721 	skb_record_rx_queue(skb, rx_ring->cq_id);
1722 	if (vlan_id != 0xffff)
1723 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1724 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1725 		napi_gro_receive(&rx_ring->napi, skb);
1726 	else
1727 		netif_receive_skb(skb);
1728 }
1729 
ql_realign_skb(struct sk_buff * skb,int len)1730 static void ql_realign_skb(struct sk_buff *skb, int len)
1731 {
1732 	void *temp_addr = skb->data;
1733 
1734 	/* Undo the skb_reserve(skb,32) we did before
1735 	 * giving to hardware, and realign data on
1736 	 * a 2-byte boundary.
1737 	 */
1738 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1739 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1740 	skb_copy_to_linear_data(skb, temp_addr,
1741 		(unsigned int)len);
1742 }
1743 
1744 /*
1745  * This function builds an skb for the given inbound
1746  * completion.  It will be rewritten for readability in the near
1747  * future, but for not it works well.
1748  */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1749 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1750 				       struct rx_ring *rx_ring,
1751 				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1752 {
1753 	struct bq_desc *lbq_desc;
1754 	struct bq_desc *sbq_desc;
1755 	struct sk_buff *skb = NULL;
1756 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1757 	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1758 	size_t hlen = ETH_HLEN;
1759 
1760 	/*
1761 	 * Handle the header buffer if present.
1762 	 */
1763 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1764 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1765 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 			     "Header of %d bytes in small buffer.\n", hdr_len);
1767 		/*
1768 		 * Headers fit nicely into a small buffer.
1769 		 */
1770 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1771 		pci_unmap_single(qdev->pdev,
1772 				dma_unmap_addr(sbq_desc, mapaddr),
1773 				dma_unmap_len(sbq_desc, maplen),
1774 				PCI_DMA_FROMDEVICE);
1775 		skb = sbq_desc->p.skb;
1776 		ql_realign_skb(skb, hdr_len);
1777 		skb_put(skb, hdr_len);
1778 		sbq_desc->p.skb = NULL;
1779 	}
1780 
1781 	/*
1782 	 * Handle the data buffer(s).
1783 	 */
1784 	if (unlikely(!length)) {	/* Is there data too? */
1785 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786 			     "No Data buffer in this packet.\n");
1787 		return skb;
1788 	}
1789 
1790 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1791 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 				     "Headers in small, data of %d bytes in small, combine them.\n",
1794 				     length);
1795 			/*
1796 			 * Data is less than small buffer size so it's
1797 			 * stuffed in a small buffer.
1798 			 * For this case we append the data
1799 			 * from the "data" small buffer to the "header" small
1800 			 * buffer.
1801 			 */
1802 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1803 			pci_dma_sync_single_for_cpu(qdev->pdev,
1804 						    dma_unmap_addr
1805 						    (sbq_desc, mapaddr),
1806 						    dma_unmap_len
1807 						    (sbq_desc, maplen),
1808 						    PCI_DMA_FROMDEVICE);
1809 			memcpy(skb_put(skb, length),
1810 			       sbq_desc->p.skb->data, length);
1811 			pci_dma_sync_single_for_device(qdev->pdev,
1812 						       dma_unmap_addr
1813 						       (sbq_desc,
1814 							mapaddr),
1815 						       dma_unmap_len
1816 						       (sbq_desc,
1817 							maplen),
1818 						       PCI_DMA_FROMDEVICE);
1819 		} else {
1820 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 				     "%d bytes in a single small buffer.\n",
1822 				     length);
1823 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1824 			skb = sbq_desc->p.skb;
1825 			ql_realign_skb(skb, length);
1826 			skb_put(skb, length);
1827 			pci_unmap_single(qdev->pdev,
1828 					 dma_unmap_addr(sbq_desc,
1829 							mapaddr),
1830 					 dma_unmap_len(sbq_desc,
1831 						       maplen),
1832 					 PCI_DMA_FROMDEVICE);
1833 			sbq_desc->p.skb = NULL;
1834 		}
1835 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1836 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1837 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1838 				     "Header in small, %d bytes in large. Chain large to small!\n",
1839 				     length);
1840 			/*
1841 			 * The data is in a single large buffer.  We
1842 			 * chain it to the header buffer's skb and let
1843 			 * it rip.
1844 			 */
1845 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1846 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1847 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1848 				     lbq_desc->p.pg_chunk.offset, length);
1849 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1850 						lbq_desc->p.pg_chunk.offset,
1851 						length);
1852 			skb->len += length;
1853 			skb->data_len += length;
1854 			skb->truesize += length;
1855 		} else {
1856 			/*
1857 			 * The headers and data are in a single large buffer. We
1858 			 * copy it to a new skb and let it go. This can happen with
1859 			 * jumbo mtu on a non-TCP/UDP frame.
1860 			 */
1861 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1862 			skb = netdev_alloc_skb(qdev->ndev, length);
1863 			if (skb == NULL) {
1864 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1865 					     "No skb available, drop the packet.\n");
1866 				return NULL;
1867 			}
1868 			pci_unmap_page(qdev->pdev,
1869 				       dma_unmap_addr(lbq_desc,
1870 						      mapaddr),
1871 				       dma_unmap_len(lbq_desc, maplen),
1872 				       PCI_DMA_FROMDEVICE);
1873 			skb_reserve(skb, NET_IP_ALIGN);
1874 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1875 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1876 				     length);
1877 			skb_fill_page_desc(skb, 0,
1878 						lbq_desc->p.pg_chunk.page,
1879 						lbq_desc->p.pg_chunk.offset,
1880 						length);
1881 			skb->len += length;
1882 			skb->data_len += length;
1883 			skb->truesize += length;
1884 			length -= length;
1885 			ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1886 					      lbq_desc->p.pg_chunk.va,
1887 					      &hlen);
1888 			__pskb_pull_tail(skb, hlen);
1889 		}
1890 	} else {
1891 		/*
1892 		 * The data is in a chain of large buffers
1893 		 * pointed to by a small buffer.  We loop
1894 		 * thru and chain them to the our small header
1895 		 * buffer's skb.
1896 		 * frags:  There are 18 max frags and our small
1897 		 *         buffer will hold 32 of them. The thing is,
1898 		 *         we'll use 3 max for our 9000 byte jumbo
1899 		 *         frames.  If the MTU goes up we could
1900 		 *          eventually be in trouble.
1901 		 */
1902 		int size, i = 0;
1903 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1904 		pci_unmap_single(qdev->pdev,
1905 				 dma_unmap_addr(sbq_desc, mapaddr),
1906 				 dma_unmap_len(sbq_desc, maplen),
1907 				 PCI_DMA_FROMDEVICE);
1908 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1909 			/*
1910 			 * This is an non TCP/UDP IP frame, so
1911 			 * the headers aren't split into a small
1912 			 * buffer.  We have to use the small buffer
1913 			 * that contains our sg list as our skb to
1914 			 * send upstairs. Copy the sg list here to
1915 			 * a local buffer and use it to find the
1916 			 * pages to chain.
1917 			 */
1918 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 				     "%d bytes of headers & data in chain of large.\n",
1920 				     length);
1921 			skb = sbq_desc->p.skb;
1922 			sbq_desc->p.skb = NULL;
1923 			skb_reserve(skb, NET_IP_ALIGN);
1924 		}
1925 		do {
1926 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1927 			size = (length < rx_ring->lbq_buf_size) ? length :
1928 				rx_ring->lbq_buf_size;
1929 
1930 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931 				     "Adding page %d to skb for %d bytes.\n",
1932 				     i, size);
1933 			skb_fill_page_desc(skb, i,
1934 						lbq_desc->p.pg_chunk.page,
1935 						lbq_desc->p.pg_chunk.offset,
1936 						size);
1937 			skb->len += size;
1938 			skb->data_len += size;
1939 			skb->truesize += size;
1940 			length -= size;
1941 			i++;
1942 		} while (length > 0);
1943 		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1944 				      &hlen);
1945 		__pskb_pull_tail(skb, hlen);
1946 	}
1947 	return skb;
1948 }
1949 
1950 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1951 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1952 				   struct rx_ring *rx_ring,
1953 				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1954 				   u16 vlan_id)
1955 {
1956 	struct net_device *ndev = qdev->ndev;
1957 	struct sk_buff *skb = NULL;
1958 
1959 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1960 
1961 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1962 	if (unlikely(!skb)) {
1963 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964 			     "No skb available, drop packet.\n");
1965 		rx_ring->rx_dropped++;
1966 		return;
1967 	}
1968 
1969 	/* Frame error, so drop the packet. */
1970 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1971 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1972 		dev_kfree_skb_any(skb);
1973 		return;
1974 	}
1975 
1976 	/* The max framesize filter on this chip is set higher than
1977 	 * MTU since FCoE uses 2k frames.
1978 	 */
1979 	if (skb->len > ndev->mtu + ETH_HLEN) {
1980 		dev_kfree_skb_any(skb);
1981 		rx_ring->rx_dropped++;
1982 		return;
1983 	}
1984 
1985 	/* loopback self test for ethtool */
1986 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1987 		ql_check_lb_frame(qdev, skb);
1988 		dev_kfree_skb_any(skb);
1989 		return;
1990 	}
1991 
1992 	prefetch(skb->data);
1993 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1994 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1995 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1996 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1997 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1998 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1999 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2000 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2001 		rx_ring->rx_multicast++;
2002 	}
2003 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2004 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2005 			     "Promiscuous Packet.\n");
2006 	}
2007 
2008 	skb->protocol = eth_type_trans(skb, ndev);
2009 	skb_checksum_none_assert(skb);
2010 
2011 	/* If rx checksum is on, and there are no
2012 	 * csum or frame errors.
2013 	 */
2014 	if ((ndev->features & NETIF_F_RXCSUM) &&
2015 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2016 		/* TCP frame. */
2017 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2018 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 				     "TCP checksum done!\n");
2020 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2021 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2022 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2023 		/* Unfragmented ipv4 UDP frame. */
2024 			struct iphdr *iph = (struct iphdr *) skb->data;
2025 			if (!(iph->frag_off &
2026 				htons(IP_MF|IP_OFFSET))) {
2027 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2028 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029 					     "TCP checksum done!\n");
2030 			}
2031 		}
2032 	}
2033 
2034 	rx_ring->rx_packets++;
2035 	rx_ring->rx_bytes += skb->len;
2036 	skb_record_rx_queue(skb, rx_ring->cq_id);
2037 	if (vlan_id != 0xffff)
2038 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2039 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2040 		napi_gro_receive(&rx_ring->napi, skb);
2041 	else
2042 		netif_receive_skb(skb);
2043 }
2044 
2045 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)2046 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 					struct rx_ring *rx_ring,
2048 					struct ib_mac_iocb_rsp *ib_mac_rsp)
2049 {
2050 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051 	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2052 			(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2053 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
2054 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2055 
2056 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2057 
2058 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2059 		/* The data and headers are split into
2060 		 * separate buffers.
2061 		 */
2062 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063 						vlan_id);
2064 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2065 		/* The data fit in a single small buffer.
2066 		 * Allocate a new skb, copy the data and
2067 		 * return the buffer to the free pool.
2068 		 */
2069 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2070 						length, vlan_id);
2071 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2072 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2073 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2074 		/* TCP packet in a page chunk that's been checksummed.
2075 		 * Tack it on to our GRO skb and let it go.
2076 		 */
2077 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2078 						length, vlan_id);
2079 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2080 		/* Non-TCP packet in a page chunk. Allocate an
2081 		 * skb, tack it on frags, and send it up.
2082 		 */
2083 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2084 						length, vlan_id);
2085 	} else {
2086 		/* Non-TCP/UDP large frames that span multiple buffers
2087 		 * can be processed corrrectly by the split frame logic.
2088 		 */
2089 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2090 						vlan_id);
2091 	}
2092 
2093 	return (unsigned long)length;
2094 }
2095 
2096 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)2097 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2098 				   struct ob_mac_iocb_rsp *mac_rsp)
2099 {
2100 	struct tx_ring *tx_ring;
2101 	struct tx_ring_desc *tx_ring_desc;
2102 
2103 	QL_DUMP_OB_MAC_RSP(mac_rsp);
2104 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2106 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2107 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2108 	tx_ring->tx_packets++;
2109 	dev_kfree_skb(tx_ring_desc->skb);
2110 	tx_ring_desc->skb = NULL;
2111 
2112 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2113 					OB_MAC_IOCB_RSP_S |
2114 					OB_MAC_IOCB_RSP_L |
2115 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2116 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2117 			netif_warn(qdev, tx_done, qdev->ndev,
2118 				   "Total descriptor length did not match transfer length.\n");
2119 		}
2120 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2121 			netif_warn(qdev, tx_done, qdev->ndev,
2122 				   "Frame too short to be valid, not sent.\n");
2123 		}
2124 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2125 			netif_warn(qdev, tx_done, qdev->ndev,
2126 				   "Frame too long, but sent anyway.\n");
2127 		}
2128 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2129 			netif_warn(qdev, tx_done, qdev->ndev,
2130 				   "PCI backplane error. Frame not sent.\n");
2131 		}
2132 	}
2133 	atomic_inc(&tx_ring->tx_count);
2134 }
2135 
2136 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2137 void ql_queue_fw_error(struct ql_adapter *qdev)
2138 {
2139 	ql_link_off(qdev);
2140 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2141 }
2142 
ql_queue_asic_error(struct ql_adapter * qdev)2143 void ql_queue_asic_error(struct ql_adapter *qdev)
2144 {
2145 	ql_link_off(qdev);
2146 	ql_disable_interrupts(qdev);
2147 	/* Clear adapter up bit to signal the recovery
2148 	 * process that it shouldn't kill the reset worker
2149 	 * thread
2150 	 */
2151 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2152 	/* Set asic recovery bit to indicate reset process that we are
2153 	 * in fatal error recovery process rather than normal close
2154 	 */
2155 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2156 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2157 }
2158 
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2159 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2160 				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2161 {
2162 	switch (ib_ae_rsp->event) {
2163 	case MGMT_ERR_EVENT:
2164 		netif_err(qdev, rx_err, qdev->ndev,
2165 			  "Management Processor Fatal Error.\n");
2166 		ql_queue_fw_error(qdev);
2167 		return;
2168 
2169 	case CAM_LOOKUP_ERR_EVENT:
2170 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2171 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2172 		ql_queue_asic_error(qdev);
2173 		return;
2174 
2175 	case SOFT_ECC_ERROR_EVENT:
2176 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2177 		ql_queue_asic_error(qdev);
2178 		break;
2179 
2180 	case PCI_ERR_ANON_BUF_RD:
2181 		netdev_err(qdev->ndev, "PCI error occurred when reading "
2182 					"anonymous buffers from rx_ring %d.\n",
2183 					ib_ae_rsp->q_id);
2184 		ql_queue_asic_error(qdev);
2185 		break;
2186 
2187 	default:
2188 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2189 			  ib_ae_rsp->event);
2190 		ql_queue_asic_error(qdev);
2191 		break;
2192 	}
2193 }
2194 
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2195 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2196 {
2197 	struct ql_adapter *qdev = rx_ring->qdev;
2198 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2199 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2200 	int count = 0;
2201 
2202 	struct tx_ring *tx_ring;
2203 	/* While there are entries in the completion queue. */
2204 	while (prod != rx_ring->cnsmr_idx) {
2205 
2206 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2208 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2209 
2210 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2211 		rmb();
2212 		switch (net_rsp->opcode) {
2213 
2214 		case OPCODE_OB_MAC_TSO_IOCB:
2215 		case OPCODE_OB_MAC_IOCB:
2216 			ql_process_mac_tx_intr(qdev, net_rsp);
2217 			break;
2218 		default:
2219 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2220 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2221 				     net_rsp->opcode);
2222 		}
2223 		count++;
2224 		ql_update_cq(rx_ring);
2225 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2226 	}
2227 	if (!net_rsp)
2228 		return 0;
2229 	ql_write_cq_idx(rx_ring);
2230 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2231 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2232 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2233 			/*
2234 			 * The queue got stopped because the tx_ring was full.
2235 			 * Wake it up, because it's now at least 25% empty.
2236 			 */
2237 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2238 	}
2239 
2240 	return count;
2241 }
2242 
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2243 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2244 {
2245 	struct ql_adapter *qdev = rx_ring->qdev;
2246 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2247 	struct ql_net_rsp_iocb *net_rsp;
2248 	int count = 0;
2249 
2250 	/* While there are entries in the completion queue. */
2251 	while (prod != rx_ring->cnsmr_idx) {
2252 
2253 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2254 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2255 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2256 
2257 		net_rsp = rx_ring->curr_entry;
2258 		rmb();
2259 		switch (net_rsp->opcode) {
2260 		case OPCODE_IB_MAC_IOCB:
2261 			ql_process_mac_rx_intr(qdev, rx_ring,
2262 					       (struct ib_mac_iocb_rsp *)
2263 					       net_rsp);
2264 			break;
2265 
2266 		case OPCODE_IB_AE_IOCB:
2267 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2268 						net_rsp);
2269 			break;
2270 		default:
2271 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2272 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2273 				     net_rsp->opcode);
2274 			break;
2275 		}
2276 		count++;
2277 		ql_update_cq(rx_ring);
2278 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2279 		if (count == budget)
2280 			break;
2281 	}
2282 	ql_update_buffer_queues(qdev, rx_ring);
2283 	ql_write_cq_idx(rx_ring);
2284 	return count;
2285 }
2286 
ql_napi_poll_msix(struct napi_struct * napi,int budget)2287 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2288 {
2289 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2290 	struct ql_adapter *qdev = rx_ring->qdev;
2291 	struct rx_ring *trx_ring;
2292 	int i, work_done = 0;
2293 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2294 
2295 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2296 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2297 
2298 	/* Service the TX rings first.  They start
2299 	 * right after the RSS rings. */
2300 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2301 		trx_ring = &qdev->rx_ring[i];
2302 		/* If this TX completion ring belongs to this vector and
2303 		 * it's not empty then service it.
2304 		 */
2305 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2306 			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2307 					trx_ring->cnsmr_idx)) {
2308 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309 				     "%s: Servicing TX completion ring %d.\n",
2310 				     __func__, trx_ring->cq_id);
2311 			ql_clean_outbound_rx_ring(trx_ring);
2312 		}
2313 	}
2314 
2315 	/*
2316 	 * Now service the RSS ring if it's active.
2317 	 */
2318 	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2319 					rx_ring->cnsmr_idx) {
2320 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2321 			     "%s: Servicing RX completion ring %d.\n",
2322 			     __func__, rx_ring->cq_id);
2323 		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2324 	}
2325 
2326 	if (work_done < budget) {
2327 		napi_complete(napi);
2328 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2329 	}
2330 	return work_done;
2331 }
2332 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2333 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2334 {
2335 	struct ql_adapter *qdev = netdev_priv(ndev);
2336 
2337 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2338 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2339 				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2340 	} else {
2341 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2342 	}
2343 }
2344 
2345 /**
2346  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2347  * based on the features to enable/disable hardware vlan accel
2348  */
qlge_update_hw_vlan_features(struct net_device * ndev,netdev_features_t features)2349 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2350 					netdev_features_t features)
2351 {
2352 	struct ql_adapter *qdev = netdev_priv(ndev);
2353 	int status = 0;
2354 	bool need_restart = netif_running(ndev);
2355 
2356 	if (need_restart) {
2357 		status = ql_adapter_down(qdev);
2358 		if (status) {
2359 			netif_err(qdev, link, qdev->ndev,
2360 				  "Failed to bring down the adapter\n");
2361 			return status;
2362 		}
2363 	}
2364 
2365 	/* update the features with resent change */
2366 	ndev->features = features;
2367 
2368 	if (need_restart) {
2369 		status = ql_adapter_up(qdev);
2370 		if (status) {
2371 			netif_err(qdev, link, qdev->ndev,
2372 				  "Failed to bring up the adapter\n");
2373 			return status;
2374 		}
2375 	}
2376 
2377 	return status;
2378 }
2379 
qlge_fix_features(struct net_device * ndev,netdev_features_t features)2380 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2381 	netdev_features_t features)
2382 {
2383 	int err;
2384 
2385 	/* Update the behavior of vlan accel in the adapter */
2386 	err = qlge_update_hw_vlan_features(ndev, features);
2387 	if (err)
2388 		return err;
2389 
2390 	return features;
2391 }
2392 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2393 static int qlge_set_features(struct net_device *ndev,
2394 	netdev_features_t features)
2395 {
2396 	netdev_features_t changed = ndev->features ^ features;
2397 
2398 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2399 		qlge_vlan_mode(ndev, features);
2400 
2401 	return 0;
2402 }
2403 
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2404 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2405 {
2406 	u32 enable_bit = MAC_ADDR_E;
2407 	int err;
2408 
2409 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2410 				  MAC_ADDR_TYPE_VLAN, vid);
2411 	if (err)
2412 		netif_err(qdev, ifup, qdev->ndev,
2413 			  "Failed to init vlan address.\n");
2414 	return err;
2415 }
2416 
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2417 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2418 {
2419 	struct ql_adapter *qdev = netdev_priv(ndev);
2420 	int status;
2421 	int err;
2422 
2423 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2424 	if (status)
2425 		return status;
2426 
2427 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2428 	set_bit(vid, qdev->active_vlans);
2429 
2430 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2431 
2432 	return err;
2433 }
2434 
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2435 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2436 {
2437 	u32 enable_bit = 0;
2438 	int err;
2439 
2440 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2441 				  MAC_ADDR_TYPE_VLAN, vid);
2442 	if (err)
2443 		netif_err(qdev, ifup, qdev->ndev,
2444 			  "Failed to clear vlan address.\n");
2445 	return err;
2446 }
2447 
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2448 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2449 {
2450 	struct ql_adapter *qdev = netdev_priv(ndev);
2451 	int status;
2452 	int err;
2453 
2454 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2455 	if (status)
2456 		return status;
2457 
2458 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2459 	clear_bit(vid, qdev->active_vlans);
2460 
2461 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2462 
2463 	return err;
2464 }
2465 
qlge_restore_vlan(struct ql_adapter * qdev)2466 static void qlge_restore_vlan(struct ql_adapter *qdev)
2467 {
2468 	int status;
2469 	u16 vid;
2470 
2471 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2472 	if (status)
2473 		return;
2474 
2475 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2476 		__qlge_vlan_rx_add_vid(qdev, vid);
2477 
2478 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2479 }
2480 
2481 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2482 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2483 {
2484 	struct rx_ring *rx_ring = dev_id;
2485 	napi_schedule(&rx_ring->napi);
2486 	return IRQ_HANDLED;
2487 }
2488 
2489 /* This handles a fatal error, MPI activity, and the default
2490  * rx_ring in an MSI-X multiple vector environment.
2491  * In MSI/Legacy environment it also process the rest of
2492  * the rx_rings.
2493  */
qlge_isr(int irq,void * dev_id)2494 static irqreturn_t qlge_isr(int irq, void *dev_id)
2495 {
2496 	struct rx_ring *rx_ring = dev_id;
2497 	struct ql_adapter *qdev = rx_ring->qdev;
2498 	struct intr_context *intr_context = &qdev->intr_context[0];
2499 	u32 var;
2500 	int work_done = 0;
2501 
2502 	spin_lock(&qdev->hw_lock);
2503 	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2504 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2505 			     "Shared Interrupt, Not ours!\n");
2506 		spin_unlock(&qdev->hw_lock);
2507 		return IRQ_NONE;
2508 	}
2509 	spin_unlock(&qdev->hw_lock);
2510 
2511 	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2512 
2513 	/*
2514 	 * Check for fatal error.
2515 	 */
2516 	if (var & STS_FE) {
2517 		ql_queue_asic_error(qdev);
2518 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2519 		var = ql_read32(qdev, ERR_STS);
2520 		netdev_err(qdev->ndev, "Resetting chip. "
2521 					"Error Status Register = 0x%x\n", var);
2522 		return IRQ_HANDLED;
2523 	}
2524 
2525 	/*
2526 	 * Check MPI processor activity.
2527 	 */
2528 	if ((var & STS_PI) &&
2529 		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2530 		/*
2531 		 * We've got an async event or mailbox completion.
2532 		 * Handle it and clear the source of the interrupt.
2533 		 */
2534 		netif_err(qdev, intr, qdev->ndev,
2535 			  "Got MPI processor interrupt.\n");
2536 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2537 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2538 		queue_delayed_work_on(smp_processor_id(),
2539 				qdev->workqueue, &qdev->mpi_work, 0);
2540 		work_done++;
2541 	}
2542 
2543 	/*
2544 	 * Get the bit-mask that shows the active queues for this
2545 	 * pass.  Compare it to the queues that this irq services
2546 	 * and call napi if there's a match.
2547 	 */
2548 	var = ql_read32(qdev, ISR1);
2549 	if (var & intr_context->irq_mask) {
2550 		netif_info(qdev, intr, qdev->ndev,
2551 			   "Waking handler for rx_ring[0].\n");
2552 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2553 		napi_schedule(&rx_ring->napi);
2554 		work_done++;
2555 	}
2556 	ql_enable_completion_interrupt(qdev, intr_context->intr);
2557 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2558 }
2559 
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2560 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2561 {
2562 
2563 	if (skb_is_gso(skb)) {
2564 		int err;
2565 		__be16 l3_proto = vlan_get_protocol(skb);
2566 
2567 		err = skb_cow_head(skb, 0);
2568 		if (err < 0)
2569 			return err;
2570 
2571 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2572 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2573 		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2574 		mac_iocb_ptr->total_hdrs_len =
2575 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2576 		mac_iocb_ptr->net_trans_offset =
2577 		    cpu_to_le16(skb_network_offset(skb) |
2578 				skb_transport_offset(skb)
2579 				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2580 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2581 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2582 		if (likely(l3_proto == htons(ETH_P_IP))) {
2583 			struct iphdr *iph = ip_hdr(skb);
2584 			iph->check = 0;
2585 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2586 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2587 								 iph->daddr, 0,
2588 								 IPPROTO_TCP,
2589 								 0);
2590 		} else if (l3_proto == htons(ETH_P_IPV6)) {
2591 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2592 			tcp_hdr(skb)->check =
2593 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2594 					     &ipv6_hdr(skb)->daddr,
2595 					     0, IPPROTO_TCP, 0);
2596 		}
2597 		return 1;
2598 	}
2599 	return 0;
2600 }
2601 
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2602 static void ql_hw_csum_setup(struct sk_buff *skb,
2603 			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2604 {
2605 	int len;
2606 	struct iphdr *iph = ip_hdr(skb);
2607 	__sum16 *check;
2608 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2609 	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2610 	mac_iocb_ptr->net_trans_offset =
2611 		cpu_to_le16(skb_network_offset(skb) |
2612 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2613 
2614 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2615 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2616 	if (likely(iph->protocol == IPPROTO_TCP)) {
2617 		check = &(tcp_hdr(skb)->check);
2618 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2619 		mac_iocb_ptr->total_hdrs_len =
2620 		    cpu_to_le16(skb_transport_offset(skb) +
2621 				(tcp_hdr(skb)->doff << 2));
2622 	} else {
2623 		check = &(udp_hdr(skb)->check);
2624 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2625 		mac_iocb_ptr->total_hdrs_len =
2626 		    cpu_to_le16(skb_transport_offset(skb) +
2627 				sizeof(struct udphdr));
2628 	}
2629 	*check = ~csum_tcpudp_magic(iph->saddr,
2630 				    iph->daddr, len, iph->protocol, 0);
2631 }
2632 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2633 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2634 {
2635 	struct tx_ring_desc *tx_ring_desc;
2636 	struct ob_mac_iocb_req *mac_iocb_ptr;
2637 	struct ql_adapter *qdev = netdev_priv(ndev);
2638 	int tso;
2639 	struct tx_ring *tx_ring;
2640 	u32 tx_ring_idx = (u32) skb->queue_mapping;
2641 
2642 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2643 
2644 	if (skb_padto(skb, ETH_ZLEN))
2645 		return NETDEV_TX_OK;
2646 
2647 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2648 		netif_info(qdev, tx_queued, qdev->ndev,
2649 			   "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2650 			   __func__, tx_ring_idx);
2651 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2652 		tx_ring->tx_errors++;
2653 		return NETDEV_TX_BUSY;
2654 	}
2655 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2656 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2657 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2658 
2659 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2660 	mac_iocb_ptr->tid = tx_ring_desc->index;
2661 	/* We use the upper 32-bits to store the tx queue for this IO.
2662 	 * When we get the completion we can use it to establish the context.
2663 	 */
2664 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2665 	tx_ring_desc->skb = skb;
2666 
2667 	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2668 
2669 	if (skb_vlan_tag_present(skb)) {
2670 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2671 			     "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2672 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2673 		mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2674 	}
2675 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2676 	if (tso < 0) {
2677 		dev_kfree_skb_any(skb);
2678 		return NETDEV_TX_OK;
2679 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2680 		ql_hw_csum_setup(skb,
2681 				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2682 	}
2683 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2684 			NETDEV_TX_OK) {
2685 		netif_err(qdev, tx_queued, qdev->ndev,
2686 			  "Could not map the segments.\n");
2687 		tx_ring->tx_errors++;
2688 		return NETDEV_TX_BUSY;
2689 	}
2690 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2691 	tx_ring->prod_idx++;
2692 	if (tx_ring->prod_idx == tx_ring->wq_len)
2693 		tx_ring->prod_idx = 0;
2694 	wmb();
2695 
2696 	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2697 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2698 		     "tx queued, slot %d, len %d\n",
2699 		     tx_ring->prod_idx, skb->len);
2700 
2701 	atomic_dec(&tx_ring->tx_count);
2702 
2703 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2704 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2705 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2706 			/*
2707 			 * The queue got stopped because the tx_ring was full.
2708 			 * Wake it up, because it's now at least 25% empty.
2709 			 */
2710 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2711 	}
2712 	return NETDEV_TX_OK;
2713 }
2714 
2715 
ql_free_shadow_space(struct ql_adapter * qdev)2716 static void ql_free_shadow_space(struct ql_adapter *qdev)
2717 {
2718 	if (qdev->rx_ring_shadow_reg_area) {
2719 		pci_free_consistent(qdev->pdev,
2720 				    PAGE_SIZE,
2721 				    qdev->rx_ring_shadow_reg_area,
2722 				    qdev->rx_ring_shadow_reg_dma);
2723 		qdev->rx_ring_shadow_reg_area = NULL;
2724 	}
2725 	if (qdev->tx_ring_shadow_reg_area) {
2726 		pci_free_consistent(qdev->pdev,
2727 				    PAGE_SIZE,
2728 				    qdev->tx_ring_shadow_reg_area,
2729 				    qdev->tx_ring_shadow_reg_dma);
2730 		qdev->tx_ring_shadow_reg_area = NULL;
2731 	}
2732 }
2733 
ql_alloc_shadow_space(struct ql_adapter * qdev)2734 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2735 {
2736 	qdev->rx_ring_shadow_reg_area =
2737 		pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2738 				      &qdev->rx_ring_shadow_reg_dma);
2739 	if (qdev->rx_ring_shadow_reg_area == NULL) {
2740 		netif_err(qdev, ifup, qdev->ndev,
2741 			  "Allocation of RX shadow space failed.\n");
2742 		return -ENOMEM;
2743 	}
2744 
2745 	qdev->tx_ring_shadow_reg_area =
2746 		pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2747 				      &qdev->tx_ring_shadow_reg_dma);
2748 	if (qdev->tx_ring_shadow_reg_area == NULL) {
2749 		netif_err(qdev, ifup, qdev->ndev,
2750 			  "Allocation of TX shadow space failed.\n");
2751 		goto err_wqp_sh_area;
2752 	}
2753 	return 0;
2754 
2755 err_wqp_sh_area:
2756 	pci_free_consistent(qdev->pdev,
2757 			    PAGE_SIZE,
2758 			    qdev->rx_ring_shadow_reg_area,
2759 			    qdev->rx_ring_shadow_reg_dma);
2760 	return -ENOMEM;
2761 }
2762 
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2763 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2764 {
2765 	struct tx_ring_desc *tx_ring_desc;
2766 	int i;
2767 	struct ob_mac_iocb_req *mac_iocb_ptr;
2768 
2769 	mac_iocb_ptr = tx_ring->wq_base;
2770 	tx_ring_desc = tx_ring->q;
2771 	for (i = 0; i < tx_ring->wq_len; i++) {
2772 		tx_ring_desc->index = i;
2773 		tx_ring_desc->skb = NULL;
2774 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2775 		mac_iocb_ptr++;
2776 		tx_ring_desc++;
2777 	}
2778 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2779 }
2780 
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2781 static void ql_free_tx_resources(struct ql_adapter *qdev,
2782 				 struct tx_ring *tx_ring)
2783 {
2784 	if (tx_ring->wq_base) {
2785 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2786 				    tx_ring->wq_base, tx_ring->wq_base_dma);
2787 		tx_ring->wq_base = NULL;
2788 	}
2789 	kfree(tx_ring->q);
2790 	tx_ring->q = NULL;
2791 }
2792 
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2793 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2794 				 struct tx_ring *tx_ring)
2795 {
2796 	tx_ring->wq_base =
2797 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2798 				 &tx_ring->wq_base_dma);
2799 
2800 	if ((tx_ring->wq_base == NULL) ||
2801 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2802 		goto pci_alloc_err;
2803 
2804 	tx_ring->q =
2805 	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2806 	if (tx_ring->q == NULL)
2807 		goto err;
2808 
2809 	return 0;
2810 err:
2811 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2812 			    tx_ring->wq_base, tx_ring->wq_base_dma);
2813 	tx_ring->wq_base = NULL;
2814 pci_alloc_err:
2815 	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2816 	return -ENOMEM;
2817 }
2818 
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2819 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2820 {
2821 	struct bq_desc *lbq_desc;
2822 
2823 	uint32_t  curr_idx, clean_idx;
2824 
2825 	curr_idx = rx_ring->lbq_curr_idx;
2826 	clean_idx = rx_ring->lbq_clean_idx;
2827 	while (curr_idx != clean_idx) {
2828 		lbq_desc = &rx_ring->lbq[curr_idx];
2829 
2830 		if (lbq_desc->p.pg_chunk.last_flag) {
2831 			pci_unmap_page(qdev->pdev,
2832 				lbq_desc->p.pg_chunk.map,
2833 				ql_lbq_block_size(qdev),
2834 				       PCI_DMA_FROMDEVICE);
2835 			lbq_desc->p.pg_chunk.last_flag = 0;
2836 		}
2837 
2838 		put_page(lbq_desc->p.pg_chunk.page);
2839 		lbq_desc->p.pg_chunk.page = NULL;
2840 
2841 		if (++curr_idx == rx_ring->lbq_len)
2842 			curr_idx = 0;
2843 
2844 	}
2845 	if (rx_ring->pg_chunk.page) {
2846 		pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2847 			ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2848 		put_page(rx_ring->pg_chunk.page);
2849 		rx_ring->pg_chunk.page = NULL;
2850 	}
2851 }
2852 
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2853 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2854 {
2855 	int i;
2856 	struct bq_desc *sbq_desc;
2857 
2858 	for (i = 0; i < rx_ring->sbq_len; i++) {
2859 		sbq_desc = &rx_ring->sbq[i];
2860 		if (sbq_desc == NULL) {
2861 			netif_err(qdev, ifup, qdev->ndev,
2862 				  "sbq_desc %d is NULL.\n", i);
2863 			return;
2864 		}
2865 		if (sbq_desc->p.skb) {
2866 			pci_unmap_single(qdev->pdev,
2867 					 dma_unmap_addr(sbq_desc, mapaddr),
2868 					 dma_unmap_len(sbq_desc, maplen),
2869 					 PCI_DMA_FROMDEVICE);
2870 			dev_kfree_skb(sbq_desc->p.skb);
2871 			sbq_desc->p.skb = NULL;
2872 		}
2873 	}
2874 }
2875 
2876 /* Free all large and small rx buffers associated
2877  * with the completion queues for this device.
2878  */
ql_free_rx_buffers(struct ql_adapter * qdev)2879 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2880 {
2881 	int i;
2882 	struct rx_ring *rx_ring;
2883 
2884 	for (i = 0; i < qdev->rx_ring_count; i++) {
2885 		rx_ring = &qdev->rx_ring[i];
2886 		if (rx_ring->lbq)
2887 			ql_free_lbq_buffers(qdev, rx_ring);
2888 		if (rx_ring->sbq)
2889 			ql_free_sbq_buffers(qdev, rx_ring);
2890 	}
2891 }
2892 
ql_alloc_rx_buffers(struct ql_adapter * qdev)2893 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2894 {
2895 	struct rx_ring *rx_ring;
2896 	int i;
2897 
2898 	for (i = 0; i < qdev->rx_ring_count; i++) {
2899 		rx_ring = &qdev->rx_ring[i];
2900 		if (rx_ring->type != TX_Q)
2901 			ql_update_buffer_queues(qdev, rx_ring);
2902 	}
2903 }
2904 
ql_init_lbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2905 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2906 				struct rx_ring *rx_ring)
2907 {
2908 	int i;
2909 	struct bq_desc *lbq_desc;
2910 	__le64 *bq = rx_ring->lbq_base;
2911 
2912 	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2913 	for (i = 0; i < rx_ring->lbq_len; i++) {
2914 		lbq_desc = &rx_ring->lbq[i];
2915 		memset(lbq_desc, 0, sizeof(*lbq_desc));
2916 		lbq_desc->index = i;
2917 		lbq_desc->addr = bq;
2918 		bq++;
2919 	}
2920 }
2921 
ql_init_sbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2922 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2923 				struct rx_ring *rx_ring)
2924 {
2925 	int i;
2926 	struct bq_desc *sbq_desc;
2927 	__le64 *bq = rx_ring->sbq_base;
2928 
2929 	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2930 	for (i = 0; i < rx_ring->sbq_len; i++) {
2931 		sbq_desc = &rx_ring->sbq[i];
2932 		memset(sbq_desc, 0, sizeof(*sbq_desc));
2933 		sbq_desc->index = i;
2934 		sbq_desc->addr = bq;
2935 		bq++;
2936 	}
2937 }
2938 
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2939 static void ql_free_rx_resources(struct ql_adapter *qdev,
2940 				 struct rx_ring *rx_ring)
2941 {
2942 	/* Free the small buffer queue. */
2943 	if (rx_ring->sbq_base) {
2944 		pci_free_consistent(qdev->pdev,
2945 				    rx_ring->sbq_size,
2946 				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2947 		rx_ring->sbq_base = NULL;
2948 	}
2949 
2950 	/* Free the small buffer queue control blocks. */
2951 	kfree(rx_ring->sbq);
2952 	rx_ring->sbq = NULL;
2953 
2954 	/* Free the large buffer queue. */
2955 	if (rx_ring->lbq_base) {
2956 		pci_free_consistent(qdev->pdev,
2957 				    rx_ring->lbq_size,
2958 				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2959 		rx_ring->lbq_base = NULL;
2960 	}
2961 
2962 	/* Free the large buffer queue control blocks. */
2963 	kfree(rx_ring->lbq);
2964 	rx_ring->lbq = NULL;
2965 
2966 	/* Free the rx queue. */
2967 	if (rx_ring->cq_base) {
2968 		pci_free_consistent(qdev->pdev,
2969 				    rx_ring->cq_size,
2970 				    rx_ring->cq_base, rx_ring->cq_base_dma);
2971 		rx_ring->cq_base = NULL;
2972 	}
2973 }
2974 
2975 /* Allocate queues and buffers for this completions queue based
2976  * on the values in the parameter structure. */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2977 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2978 				 struct rx_ring *rx_ring)
2979 {
2980 
2981 	/*
2982 	 * Allocate the completion queue for this rx_ring.
2983 	 */
2984 	rx_ring->cq_base =
2985 	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2986 				 &rx_ring->cq_base_dma);
2987 
2988 	if (rx_ring->cq_base == NULL) {
2989 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2990 		return -ENOMEM;
2991 	}
2992 
2993 	if (rx_ring->sbq_len) {
2994 		/*
2995 		 * Allocate small buffer queue.
2996 		 */
2997 		rx_ring->sbq_base =
2998 		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2999 					 &rx_ring->sbq_base_dma);
3000 
3001 		if (rx_ring->sbq_base == NULL) {
3002 			netif_err(qdev, ifup, qdev->ndev,
3003 				  "Small buffer queue allocation failed.\n");
3004 			goto err_mem;
3005 		}
3006 
3007 		/*
3008 		 * Allocate small buffer queue control blocks.
3009 		 */
3010 		rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3011 					     sizeof(struct bq_desc),
3012 					     GFP_KERNEL);
3013 		if (rx_ring->sbq == NULL)
3014 			goto err_mem;
3015 
3016 		ql_init_sbq_ring(qdev, rx_ring);
3017 	}
3018 
3019 	if (rx_ring->lbq_len) {
3020 		/*
3021 		 * Allocate large buffer queue.
3022 		 */
3023 		rx_ring->lbq_base =
3024 		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3025 					 &rx_ring->lbq_base_dma);
3026 
3027 		if (rx_ring->lbq_base == NULL) {
3028 			netif_err(qdev, ifup, qdev->ndev,
3029 				  "Large buffer queue allocation failed.\n");
3030 			goto err_mem;
3031 		}
3032 		/*
3033 		 * Allocate large buffer queue control blocks.
3034 		 */
3035 		rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3036 					     sizeof(struct bq_desc),
3037 					     GFP_KERNEL);
3038 		if (rx_ring->lbq == NULL)
3039 			goto err_mem;
3040 
3041 		ql_init_lbq_ring(qdev, rx_ring);
3042 	}
3043 
3044 	return 0;
3045 
3046 err_mem:
3047 	ql_free_rx_resources(qdev, rx_ring);
3048 	return -ENOMEM;
3049 }
3050 
ql_tx_ring_clean(struct ql_adapter * qdev)3051 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3052 {
3053 	struct tx_ring *tx_ring;
3054 	struct tx_ring_desc *tx_ring_desc;
3055 	int i, j;
3056 
3057 	/*
3058 	 * Loop through all queues and free
3059 	 * any resources.
3060 	 */
3061 	for (j = 0; j < qdev->tx_ring_count; j++) {
3062 		tx_ring = &qdev->tx_ring[j];
3063 		for (i = 0; i < tx_ring->wq_len; i++) {
3064 			tx_ring_desc = &tx_ring->q[i];
3065 			if (tx_ring_desc && tx_ring_desc->skb) {
3066 				netif_err(qdev, ifdown, qdev->ndev,
3067 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
3068 					  tx_ring_desc->skb, j,
3069 					  tx_ring_desc->index);
3070 				ql_unmap_send(qdev, tx_ring_desc,
3071 					      tx_ring_desc->map_cnt);
3072 				dev_kfree_skb(tx_ring_desc->skb);
3073 				tx_ring_desc->skb = NULL;
3074 			}
3075 		}
3076 	}
3077 }
3078 
ql_free_mem_resources(struct ql_adapter * qdev)3079 static void ql_free_mem_resources(struct ql_adapter *qdev)
3080 {
3081 	int i;
3082 
3083 	for (i = 0; i < qdev->tx_ring_count; i++)
3084 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3085 	for (i = 0; i < qdev->rx_ring_count; i++)
3086 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3087 	ql_free_shadow_space(qdev);
3088 }
3089 
ql_alloc_mem_resources(struct ql_adapter * qdev)3090 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3091 {
3092 	int i;
3093 
3094 	/* Allocate space for our shadow registers and such. */
3095 	if (ql_alloc_shadow_space(qdev))
3096 		return -ENOMEM;
3097 
3098 	for (i = 0; i < qdev->rx_ring_count; i++) {
3099 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3100 			netif_err(qdev, ifup, qdev->ndev,
3101 				  "RX resource allocation failed.\n");
3102 			goto err_mem;
3103 		}
3104 	}
3105 	/* Allocate tx queue resources */
3106 	for (i = 0; i < qdev->tx_ring_count; i++) {
3107 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3108 			netif_err(qdev, ifup, qdev->ndev,
3109 				  "TX resource allocation failed.\n");
3110 			goto err_mem;
3111 		}
3112 	}
3113 	return 0;
3114 
3115 err_mem:
3116 	ql_free_mem_resources(qdev);
3117 	return -ENOMEM;
3118 }
3119 
3120 /* Set up the rx ring control block and pass it to the chip.
3121  * The control block is defined as
3122  * "Completion Queue Initialization Control Block", or cqicb.
3123  */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)3124 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3125 {
3126 	struct cqicb *cqicb = &rx_ring->cqicb;
3127 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3128 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3129 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3130 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3131 	void __iomem *doorbell_area =
3132 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3133 	int err = 0;
3134 	u16 bq_len;
3135 	u64 tmp;
3136 	__le64 *base_indirect_ptr;
3137 	int page_entries;
3138 
3139 	/* Set up the shadow registers for this ring. */
3140 	rx_ring->prod_idx_sh_reg = shadow_reg;
3141 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3142 	*rx_ring->prod_idx_sh_reg = 0;
3143 	shadow_reg += sizeof(u64);
3144 	shadow_reg_dma += sizeof(u64);
3145 	rx_ring->lbq_base_indirect = shadow_reg;
3146 	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3147 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3148 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3149 	rx_ring->sbq_base_indirect = shadow_reg;
3150 	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3151 
3152 	/* PCI doorbell mem area + 0x00 for consumer index register */
3153 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3154 	rx_ring->cnsmr_idx = 0;
3155 	rx_ring->curr_entry = rx_ring->cq_base;
3156 
3157 	/* PCI doorbell mem area + 0x04 for valid register */
3158 	rx_ring->valid_db_reg = doorbell_area + 0x04;
3159 
3160 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3161 	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3162 
3163 	/* PCI doorbell mem area + 0x1c */
3164 	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3165 
3166 	memset((void *)cqicb, 0, sizeof(struct cqicb));
3167 	cqicb->msix_vect = rx_ring->irq;
3168 
3169 	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3170 	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3171 
3172 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3173 
3174 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3175 
3176 	/*
3177 	 * Set up the control block load flags.
3178 	 */
3179 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3180 	    FLAGS_LV |		/* Load MSI-X vector */
3181 	    FLAGS_LI;		/* Load irq delay values */
3182 	if (rx_ring->lbq_len) {
3183 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3184 		tmp = (u64)rx_ring->lbq_base_dma;
3185 		base_indirect_ptr = rx_ring->lbq_base_indirect;
3186 		page_entries = 0;
3187 		do {
3188 			*base_indirect_ptr = cpu_to_le64(tmp);
3189 			tmp += DB_PAGE_SIZE;
3190 			base_indirect_ptr++;
3191 			page_entries++;
3192 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3193 		cqicb->lbq_addr =
3194 		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3195 		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3196 			(u16) rx_ring->lbq_buf_size;
3197 		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3198 		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3199 			(u16) rx_ring->lbq_len;
3200 		cqicb->lbq_len = cpu_to_le16(bq_len);
3201 		rx_ring->lbq_prod_idx = 0;
3202 		rx_ring->lbq_curr_idx = 0;
3203 		rx_ring->lbq_clean_idx = 0;
3204 		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3205 	}
3206 	if (rx_ring->sbq_len) {
3207 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3208 		tmp = (u64)rx_ring->sbq_base_dma;
3209 		base_indirect_ptr = rx_ring->sbq_base_indirect;
3210 		page_entries = 0;
3211 		do {
3212 			*base_indirect_ptr = cpu_to_le64(tmp);
3213 			tmp += DB_PAGE_SIZE;
3214 			base_indirect_ptr++;
3215 			page_entries++;
3216 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3217 		cqicb->sbq_addr =
3218 		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3219 		cqicb->sbq_buf_size =
3220 		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3221 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3222 			(u16) rx_ring->sbq_len;
3223 		cqicb->sbq_len = cpu_to_le16(bq_len);
3224 		rx_ring->sbq_prod_idx = 0;
3225 		rx_ring->sbq_curr_idx = 0;
3226 		rx_ring->sbq_clean_idx = 0;
3227 		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3228 	}
3229 	switch (rx_ring->type) {
3230 	case TX_Q:
3231 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3232 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3233 		break;
3234 	case RX_Q:
3235 		/* Inbound completion handling rx_rings run in
3236 		 * separate NAPI contexts.
3237 		 */
3238 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3239 			       64);
3240 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3241 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3242 		break;
3243 	default:
3244 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3245 			     "Invalid rx_ring->type = %d.\n", rx_ring->type);
3246 	}
3247 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3248 			   CFG_LCQ, rx_ring->cq_id);
3249 	if (err) {
3250 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3251 		return err;
3252 	}
3253 	return err;
3254 }
3255 
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3256 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3257 {
3258 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3259 	void __iomem *doorbell_area =
3260 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3261 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3262 	    (tx_ring->wq_id * sizeof(u64));
3263 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3264 	    (tx_ring->wq_id * sizeof(u64));
3265 	int err = 0;
3266 
3267 	/*
3268 	 * Assign doorbell registers for this tx_ring.
3269 	 */
3270 	/* TX PCI doorbell mem area for tx producer index */
3271 	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3272 	tx_ring->prod_idx = 0;
3273 	/* TX PCI doorbell mem area + 0x04 */
3274 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3275 
3276 	/*
3277 	 * Assign shadow registers for this tx_ring.
3278 	 */
3279 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3280 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3281 
3282 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3283 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3284 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3285 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3286 	wqicb->rid = 0;
3287 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3288 
3289 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3290 
3291 	ql_init_tx_ring(qdev, tx_ring);
3292 
3293 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3294 			   (u16) tx_ring->wq_id);
3295 	if (err) {
3296 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3297 		return err;
3298 	}
3299 	return err;
3300 }
3301 
ql_disable_msix(struct ql_adapter * qdev)3302 static void ql_disable_msix(struct ql_adapter *qdev)
3303 {
3304 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3305 		pci_disable_msix(qdev->pdev);
3306 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3307 		kfree(qdev->msi_x_entry);
3308 		qdev->msi_x_entry = NULL;
3309 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3310 		pci_disable_msi(qdev->pdev);
3311 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3312 	}
3313 }
3314 
3315 /* We start by trying to get the number of vectors
3316  * stored in qdev->intr_count. If we don't get that
3317  * many then we reduce the count and try again.
3318  */
ql_enable_msix(struct ql_adapter * qdev)3319 static void ql_enable_msix(struct ql_adapter *qdev)
3320 {
3321 	int i, err;
3322 
3323 	/* Get the MSIX vectors. */
3324 	if (qlge_irq_type == MSIX_IRQ) {
3325 		/* Try to alloc space for the msix struct,
3326 		 * if it fails then go to MSI/legacy.
3327 		 */
3328 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3329 					    sizeof(struct msix_entry),
3330 					    GFP_KERNEL);
3331 		if (!qdev->msi_x_entry) {
3332 			qlge_irq_type = MSI_IRQ;
3333 			goto msi;
3334 		}
3335 
3336 		for (i = 0; i < qdev->intr_count; i++)
3337 			qdev->msi_x_entry[i].entry = i;
3338 
3339 		err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3340 					    1, qdev->intr_count);
3341 		if (err < 0) {
3342 			kfree(qdev->msi_x_entry);
3343 			qdev->msi_x_entry = NULL;
3344 			netif_warn(qdev, ifup, qdev->ndev,
3345 				   "MSI-X Enable failed, trying MSI.\n");
3346 			qlge_irq_type = MSI_IRQ;
3347 		} else {
3348 			qdev->intr_count = err;
3349 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3350 			netif_info(qdev, ifup, qdev->ndev,
3351 				   "MSI-X Enabled, got %d vectors.\n",
3352 				   qdev->intr_count);
3353 			return;
3354 		}
3355 	}
3356 msi:
3357 	qdev->intr_count = 1;
3358 	if (qlge_irq_type == MSI_IRQ) {
3359 		if (!pci_enable_msi(qdev->pdev)) {
3360 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3361 			netif_info(qdev, ifup, qdev->ndev,
3362 				   "Running with MSI interrupts.\n");
3363 			return;
3364 		}
3365 	}
3366 	qlge_irq_type = LEG_IRQ;
3367 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3368 		     "Running with legacy interrupts.\n");
3369 }
3370 
3371 /* Each vector services 1 RSS ring and and 1 or more
3372  * TX completion rings.  This function loops through
3373  * the TX completion rings and assigns the vector that
3374  * will service it.  An example would be if there are
3375  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3376  * This would mean that vector 0 would service RSS ring 0
3377  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3378  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3379  */
ql_set_tx_vect(struct ql_adapter * qdev)3380 static void ql_set_tx_vect(struct ql_adapter *qdev)
3381 {
3382 	int i, j, vect;
3383 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3384 
3385 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3386 		/* Assign irq vectors to TX rx_rings.*/
3387 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3388 					 i < qdev->rx_ring_count; i++) {
3389 			if (j == tx_rings_per_vector) {
3390 				vect++;
3391 				j = 0;
3392 			}
3393 			qdev->rx_ring[i].irq = vect;
3394 			j++;
3395 		}
3396 	} else {
3397 		/* For single vector all rings have an irq
3398 		 * of zero.
3399 		 */
3400 		for (i = 0; i < qdev->rx_ring_count; i++)
3401 			qdev->rx_ring[i].irq = 0;
3402 	}
3403 }
3404 
3405 /* Set the interrupt mask for this vector.  Each vector
3406  * will service 1 RSS ring and 1 or more TX completion
3407  * rings.  This function sets up a bit mask per vector
3408  * that indicates which rings it services.
3409  */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3410 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3411 {
3412 	int j, vect = ctx->intr;
3413 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3414 
3415 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3416 		/* Add the RSS ring serviced by this vector
3417 		 * to the mask.
3418 		 */
3419 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3420 		/* Add the TX ring(s) serviced by this vector
3421 		 * to the mask. */
3422 		for (j = 0; j < tx_rings_per_vector; j++) {
3423 			ctx->irq_mask |=
3424 			(1 << qdev->rx_ring[qdev->rss_ring_count +
3425 			(vect * tx_rings_per_vector) + j].cq_id);
3426 		}
3427 	} else {
3428 		/* For single vector we just shift each queue's
3429 		 * ID into the mask.
3430 		 */
3431 		for (j = 0; j < qdev->rx_ring_count; j++)
3432 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3433 	}
3434 }
3435 
3436 /*
3437  * Here we build the intr_context structures based on
3438  * our rx_ring count and intr vector count.
3439  * The intr_context structure is used to hook each vector
3440  * to possibly different handlers.
3441  */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3442 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3443 {
3444 	int i = 0;
3445 	struct intr_context *intr_context = &qdev->intr_context[0];
3446 
3447 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3448 		/* Each rx_ring has it's
3449 		 * own intr_context since we have separate
3450 		 * vectors for each queue.
3451 		 */
3452 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3453 			qdev->rx_ring[i].irq = i;
3454 			intr_context->intr = i;
3455 			intr_context->qdev = qdev;
3456 			/* Set up this vector's bit-mask that indicates
3457 			 * which queues it services.
3458 			 */
3459 			ql_set_irq_mask(qdev, intr_context);
3460 			/*
3461 			 * We set up each vectors enable/disable/read bits so
3462 			 * there's no bit/mask calculations in the critical path.
3463 			 */
3464 			intr_context->intr_en_mask =
3465 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3466 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3467 			    | i;
3468 			intr_context->intr_dis_mask =
3469 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3470 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3471 			    INTR_EN_IHD | i;
3472 			intr_context->intr_read_mask =
3473 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3475 			    i;
3476 			if (i == 0) {
3477 				/* The first vector/queue handles
3478 				 * broadcast/multicast, fatal errors,
3479 				 * and firmware events.  This in addition
3480 				 * to normal inbound NAPI processing.
3481 				 */
3482 				intr_context->handler = qlge_isr;
3483 				sprintf(intr_context->name, "%s-rx-%d",
3484 					qdev->ndev->name, i);
3485 			} else {
3486 				/*
3487 				 * Inbound queues handle unicast frames only.
3488 				 */
3489 				intr_context->handler = qlge_msix_rx_isr;
3490 				sprintf(intr_context->name, "%s-rx-%d",
3491 					qdev->ndev->name, i);
3492 			}
3493 		}
3494 	} else {
3495 		/*
3496 		 * All rx_rings use the same intr_context since
3497 		 * there is only one vector.
3498 		 */
3499 		intr_context->intr = 0;
3500 		intr_context->qdev = qdev;
3501 		/*
3502 		 * We set up each vectors enable/disable/read bits so
3503 		 * there's no bit/mask calculations in the critical path.
3504 		 */
3505 		intr_context->intr_en_mask =
3506 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3507 		intr_context->intr_dis_mask =
3508 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3509 		    INTR_EN_TYPE_DISABLE;
3510 		intr_context->intr_read_mask =
3511 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3512 		/*
3513 		 * Single interrupt means one handler for all rings.
3514 		 */
3515 		intr_context->handler = qlge_isr;
3516 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3517 		/* Set up this vector's bit-mask that indicates
3518 		 * which queues it services. In this case there is
3519 		 * a single vector so it will service all RSS and
3520 		 * TX completion rings.
3521 		 */
3522 		ql_set_irq_mask(qdev, intr_context);
3523 	}
3524 	/* Tell the TX completion rings which MSIx vector
3525 	 * they will be using.
3526 	 */
3527 	ql_set_tx_vect(qdev);
3528 }
3529 
ql_free_irq(struct ql_adapter * qdev)3530 static void ql_free_irq(struct ql_adapter *qdev)
3531 {
3532 	int i;
3533 	struct intr_context *intr_context = &qdev->intr_context[0];
3534 
3535 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3536 		if (intr_context->hooked) {
3537 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3538 				free_irq(qdev->msi_x_entry[i].vector,
3539 					 &qdev->rx_ring[i]);
3540 			} else {
3541 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3542 			}
3543 		}
3544 	}
3545 	ql_disable_msix(qdev);
3546 }
3547 
ql_request_irq(struct ql_adapter * qdev)3548 static int ql_request_irq(struct ql_adapter *qdev)
3549 {
3550 	int i;
3551 	int status = 0;
3552 	struct pci_dev *pdev = qdev->pdev;
3553 	struct intr_context *intr_context = &qdev->intr_context[0];
3554 
3555 	ql_resolve_queues_to_irqs(qdev);
3556 
3557 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3558 		atomic_set(&intr_context->irq_cnt, 0);
3559 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3560 			status = request_irq(qdev->msi_x_entry[i].vector,
3561 					     intr_context->handler,
3562 					     0,
3563 					     intr_context->name,
3564 					     &qdev->rx_ring[i]);
3565 			if (status) {
3566 				netif_err(qdev, ifup, qdev->ndev,
3567 					  "Failed request for MSIX interrupt %d.\n",
3568 					  i);
3569 				goto err_irq;
3570 			}
3571 		} else {
3572 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3573 				     "trying msi or legacy interrupts.\n");
3574 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3575 				     "%s: irq = %d.\n", __func__, pdev->irq);
3576 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3577 				     "%s: context->name = %s.\n", __func__,
3578 				     intr_context->name);
3579 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3580 				     "%s: dev_id = 0x%p.\n", __func__,
3581 				     &qdev->rx_ring[0]);
3582 			status =
3583 			    request_irq(pdev->irq, qlge_isr,
3584 					test_bit(QL_MSI_ENABLED,
3585 						 &qdev->
3586 						 flags) ? 0 : IRQF_SHARED,
3587 					intr_context->name, &qdev->rx_ring[0]);
3588 			if (status)
3589 				goto err_irq;
3590 
3591 			netif_err(qdev, ifup, qdev->ndev,
3592 				  "Hooked intr %d, queue type %s, with name %s.\n",
3593 				  i,
3594 				  qdev->rx_ring[0].type == DEFAULT_Q ?
3595 				  "DEFAULT_Q" :
3596 				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3597 				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3598 				  intr_context->name);
3599 		}
3600 		intr_context->hooked = 1;
3601 	}
3602 	return status;
3603 err_irq:
3604 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3605 	ql_free_irq(qdev);
3606 	return status;
3607 }
3608 
ql_start_rss(struct ql_adapter * qdev)3609 static int ql_start_rss(struct ql_adapter *qdev)
3610 {
3611 	static const u8 init_hash_seed[] = {
3612 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3613 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3614 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3615 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3616 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3617 	};
3618 	struct ricb *ricb = &qdev->ricb;
3619 	int status = 0;
3620 	int i;
3621 	u8 *hash_id = (u8 *) ricb->hash_cq_id;
3622 
3623 	memset((void *)ricb, 0, sizeof(*ricb));
3624 
3625 	ricb->base_cq = RSS_L4K;
3626 	ricb->flags =
3627 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3628 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3629 
3630 	/*
3631 	 * Fill out the Indirection Table.
3632 	 */
3633 	for (i = 0; i < 1024; i++)
3634 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3635 
3636 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3637 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3638 
3639 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3640 	if (status) {
3641 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3642 		return status;
3643 	}
3644 	return status;
3645 }
3646 
ql_clear_routing_entries(struct ql_adapter * qdev)3647 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3648 {
3649 	int i, status = 0;
3650 
3651 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3652 	if (status)
3653 		return status;
3654 	/* Clear all the entries in the routing table. */
3655 	for (i = 0; i < 16; i++) {
3656 		status = ql_set_routing_reg(qdev, i, 0, 0);
3657 		if (status) {
3658 			netif_err(qdev, ifup, qdev->ndev,
3659 				  "Failed to init routing register for CAM packets.\n");
3660 			break;
3661 		}
3662 	}
3663 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3664 	return status;
3665 }
3666 
3667 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3668 static int ql_route_initialize(struct ql_adapter *qdev)
3669 {
3670 	int status = 0;
3671 
3672 	/* Clear all the entries in the routing table. */
3673 	status = ql_clear_routing_entries(qdev);
3674 	if (status)
3675 		return status;
3676 
3677 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3678 	if (status)
3679 		return status;
3680 
3681 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3682 						RT_IDX_IP_CSUM_ERR, 1);
3683 	if (status) {
3684 		netif_err(qdev, ifup, qdev->ndev,
3685 			"Failed to init routing register "
3686 			"for IP CSUM error packets.\n");
3687 		goto exit;
3688 	}
3689 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3690 						RT_IDX_TU_CSUM_ERR, 1);
3691 	if (status) {
3692 		netif_err(qdev, ifup, qdev->ndev,
3693 			"Failed to init routing register "
3694 			"for TCP/UDP CSUM error packets.\n");
3695 		goto exit;
3696 	}
3697 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3698 	if (status) {
3699 		netif_err(qdev, ifup, qdev->ndev,
3700 			  "Failed to init routing register for broadcast packets.\n");
3701 		goto exit;
3702 	}
3703 	/* If we have more than one inbound queue, then turn on RSS in the
3704 	 * routing block.
3705 	 */
3706 	if (qdev->rss_ring_count > 1) {
3707 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3708 					RT_IDX_RSS_MATCH, 1);
3709 		if (status) {
3710 			netif_err(qdev, ifup, qdev->ndev,
3711 				  "Failed to init routing register for MATCH RSS packets.\n");
3712 			goto exit;
3713 		}
3714 	}
3715 
3716 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3717 				    RT_IDX_CAM_HIT, 1);
3718 	if (status)
3719 		netif_err(qdev, ifup, qdev->ndev,
3720 			  "Failed to init routing register for CAM packets.\n");
3721 exit:
3722 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3723 	return status;
3724 }
3725 
ql_cam_route_initialize(struct ql_adapter * qdev)3726 int ql_cam_route_initialize(struct ql_adapter *qdev)
3727 {
3728 	int status, set;
3729 
3730 	/* If check if the link is up and use to
3731 	 * determine if we are setting or clearing
3732 	 * the MAC address in the CAM.
3733 	 */
3734 	set = ql_read32(qdev, STS);
3735 	set &= qdev->port_link_up;
3736 	status = ql_set_mac_addr(qdev, set);
3737 	if (status) {
3738 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3739 		return status;
3740 	}
3741 
3742 	status = ql_route_initialize(qdev);
3743 	if (status)
3744 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3745 
3746 	return status;
3747 }
3748 
ql_adapter_initialize(struct ql_adapter * qdev)3749 static int ql_adapter_initialize(struct ql_adapter *qdev)
3750 {
3751 	u32 value, mask;
3752 	int i;
3753 	int status = 0;
3754 
3755 	/*
3756 	 * Set up the System register to halt on errors.
3757 	 */
3758 	value = SYS_EFE | SYS_FAE;
3759 	mask = value << 16;
3760 	ql_write32(qdev, SYS, mask | value);
3761 
3762 	/* Set the default queue, and VLAN behavior. */
3763 	value = NIC_RCV_CFG_DFQ;
3764 	mask = NIC_RCV_CFG_DFQ_MASK;
3765 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3766 		value |= NIC_RCV_CFG_RV;
3767 		mask |= (NIC_RCV_CFG_RV << 16);
3768 	}
3769 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3770 
3771 	/* Set the MPI interrupt to enabled. */
3772 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3773 
3774 	/* Enable the function, set pagesize, enable error checking. */
3775 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3776 	    FSC_EC | FSC_VM_PAGE_4K;
3777 	value |= SPLT_SETTING;
3778 
3779 	/* Set/clear header splitting. */
3780 	mask = FSC_VM_PAGESIZE_MASK |
3781 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3782 	ql_write32(qdev, FSC, mask | value);
3783 
3784 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3785 
3786 	/* Set RX packet routing to use port/pci function on which the
3787 	 * packet arrived on in addition to usual frame routing.
3788 	 * This is helpful on bonding where both interfaces can have
3789 	 * the same MAC address.
3790 	 */
3791 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3792 	/* Reroute all packets to our Interface.
3793 	 * They may have been routed to MPI firmware
3794 	 * due to WOL.
3795 	 */
3796 	value = ql_read32(qdev, MGMT_RCV_CFG);
3797 	value &= ~MGMT_RCV_CFG_RM;
3798 	mask = 0xffff0000;
3799 
3800 	/* Sticky reg needs clearing due to WOL. */
3801 	ql_write32(qdev, MGMT_RCV_CFG, mask);
3802 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3803 
3804 	/* Default WOL is enable on Mezz cards */
3805 	if (qdev->pdev->subsystem_device == 0x0068 ||
3806 			qdev->pdev->subsystem_device == 0x0180)
3807 		qdev->wol = WAKE_MAGIC;
3808 
3809 	/* Start up the rx queues. */
3810 	for (i = 0; i < qdev->rx_ring_count; i++) {
3811 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3812 		if (status) {
3813 			netif_err(qdev, ifup, qdev->ndev,
3814 				  "Failed to start rx ring[%d].\n", i);
3815 			return status;
3816 		}
3817 	}
3818 
3819 	/* If there is more than one inbound completion queue
3820 	 * then download a RICB to configure RSS.
3821 	 */
3822 	if (qdev->rss_ring_count > 1) {
3823 		status = ql_start_rss(qdev);
3824 		if (status) {
3825 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3826 			return status;
3827 		}
3828 	}
3829 
3830 	/* Start up the tx queues. */
3831 	for (i = 0; i < qdev->tx_ring_count; i++) {
3832 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3833 		if (status) {
3834 			netif_err(qdev, ifup, qdev->ndev,
3835 				  "Failed to start tx ring[%d].\n", i);
3836 			return status;
3837 		}
3838 	}
3839 
3840 	/* Initialize the port and set the max framesize. */
3841 	status = qdev->nic_ops->port_initialize(qdev);
3842 	if (status)
3843 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3844 
3845 	/* Set up the MAC address and frame routing filter. */
3846 	status = ql_cam_route_initialize(qdev);
3847 	if (status) {
3848 		netif_err(qdev, ifup, qdev->ndev,
3849 			  "Failed to init CAM/Routing tables.\n");
3850 		return status;
3851 	}
3852 
3853 	/* Start NAPI for the RSS queues. */
3854 	for (i = 0; i < qdev->rss_ring_count; i++)
3855 		napi_enable(&qdev->rx_ring[i].napi);
3856 
3857 	return status;
3858 }
3859 
3860 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3861 static int ql_adapter_reset(struct ql_adapter *qdev)
3862 {
3863 	u32 value;
3864 	int status = 0;
3865 	unsigned long end_jiffies;
3866 
3867 	/* Clear all the entries in the routing table. */
3868 	status = ql_clear_routing_entries(qdev);
3869 	if (status) {
3870 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3871 		return status;
3872 	}
3873 
3874 	end_jiffies = jiffies +
3875 		max((unsigned long)1, usecs_to_jiffies(30));
3876 
3877 	/* Check if bit is set then skip the mailbox command and
3878 	 * clear the bit, else we are in normal reset process.
3879 	 */
3880 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3881 		/* Stop management traffic. */
3882 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3883 
3884 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3885 		ql_wait_fifo_empty(qdev);
3886 	} else
3887 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3888 
3889 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3890 
3891 	do {
3892 		value = ql_read32(qdev, RST_FO);
3893 		if ((value & RST_FO_FR) == 0)
3894 			break;
3895 		cpu_relax();
3896 	} while (time_before(jiffies, end_jiffies));
3897 
3898 	if (value & RST_FO_FR) {
3899 		netif_err(qdev, ifdown, qdev->ndev,
3900 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3901 		status = -ETIMEDOUT;
3902 	}
3903 
3904 	/* Resume management traffic. */
3905 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3906 	return status;
3907 }
3908 
ql_display_dev_info(struct net_device * ndev)3909 static void ql_display_dev_info(struct net_device *ndev)
3910 {
3911 	struct ql_adapter *qdev = netdev_priv(ndev);
3912 
3913 	netif_info(qdev, probe, qdev->ndev,
3914 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3915 		   "XG Roll = %d, XG Rev = %d.\n",
3916 		   qdev->func,
3917 		   qdev->port,
3918 		   qdev->chip_rev_id & 0x0000000f,
3919 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3920 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3921 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3922 	netif_info(qdev, probe, qdev->ndev,
3923 		   "MAC address %pM\n", ndev->dev_addr);
3924 }
3925 
ql_wol(struct ql_adapter * qdev)3926 static int ql_wol(struct ql_adapter *qdev)
3927 {
3928 	int status = 0;
3929 	u32 wol = MB_WOL_DISABLE;
3930 
3931 	/* The CAM is still intact after a reset, but if we
3932 	 * are doing WOL, then we may need to program the
3933 	 * routing regs. We would also need to issue the mailbox
3934 	 * commands to instruct the MPI what to do per the ethtool
3935 	 * settings.
3936 	 */
3937 
3938 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3939 			WAKE_MCAST | WAKE_BCAST)) {
3940 		netif_err(qdev, ifdown, qdev->ndev,
3941 			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3942 			  qdev->wol);
3943 		return -EINVAL;
3944 	}
3945 
3946 	if (qdev->wol & WAKE_MAGIC) {
3947 		status = ql_mb_wol_set_magic(qdev, 1);
3948 		if (status) {
3949 			netif_err(qdev, ifdown, qdev->ndev,
3950 				  "Failed to set magic packet on %s.\n",
3951 				  qdev->ndev->name);
3952 			return status;
3953 		} else
3954 			netif_info(qdev, drv, qdev->ndev,
3955 				   "Enabled magic packet successfully on %s.\n",
3956 				   qdev->ndev->name);
3957 
3958 		wol |= MB_WOL_MAGIC_PKT;
3959 	}
3960 
3961 	if (qdev->wol) {
3962 		wol |= MB_WOL_MODE_ON;
3963 		status = ql_mb_wol_mode(qdev, wol);
3964 		netif_err(qdev, drv, qdev->ndev,
3965 			  "WOL %s (wol code 0x%x) on %s\n",
3966 			  (status == 0) ? "Successfully set" : "Failed",
3967 			  wol, qdev->ndev->name);
3968 	}
3969 
3970 	return status;
3971 }
3972 
ql_cancel_all_work_sync(struct ql_adapter * qdev)3973 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3974 {
3975 
3976 	/* Don't kill the reset worker thread if we
3977 	 * are in the process of recovery.
3978 	 */
3979 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3980 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3981 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3982 	cancel_delayed_work_sync(&qdev->mpi_work);
3983 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3984 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3985 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3986 }
3987 
ql_adapter_down(struct ql_adapter * qdev)3988 static int ql_adapter_down(struct ql_adapter *qdev)
3989 {
3990 	int i, status = 0;
3991 
3992 	ql_link_off(qdev);
3993 
3994 	ql_cancel_all_work_sync(qdev);
3995 
3996 	for (i = 0; i < qdev->rss_ring_count; i++)
3997 		napi_disable(&qdev->rx_ring[i].napi);
3998 
3999 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4000 
4001 	ql_disable_interrupts(qdev);
4002 
4003 	ql_tx_ring_clean(qdev);
4004 
4005 	/* Call netif_napi_del() from common point.
4006 	 */
4007 	for (i = 0; i < qdev->rss_ring_count; i++)
4008 		netif_napi_del(&qdev->rx_ring[i].napi);
4009 
4010 	status = ql_adapter_reset(qdev);
4011 	if (status)
4012 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4013 			  qdev->func);
4014 	ql_free_rx_buffers(qdev);
4015 
4016 	return status;
4017 }
4018 
ql_adapter_up(struct ql_adapter * qdev)4019 static int ql_adapter_up(struct ql_adapter *qdev)
4020 {
4021 	int err = 0;
4022 
4023 	err = ql_adapter_initialize(qdev);
4024 	if (err) {
4025 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4026 		goto err_init;
4027 	}
4028 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4029 	ql_alloc_rx_buffers(qdev);
4030 	/* If the port is initialized and the
4031 	 * link is up the turn on the carrier.
4032 	 */
4033 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
4034 			(ql_read32(qdev, STS) & qdev->port_link_up))
4035 		ql_link_on(qdev);
4036 	/* Restore rx mode. */
4037 	clear_bit(QL_ALLMULTI, &qdev->flags);
4038 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4039 	qlge_set_multicast_list(qdev->ndev);
4040 
4041 	/* Restore vlan setting. */
4042 	qlge_restore_vlan(qdev);
4043 
4044 	ql_enable_interrupts(qdev);
4045 	ql_enable_all_completion_interrupts(qdev);
4046 	netif_tx_start_all_queues(qdev->ndev);
4047 
4048 	return 0;
4049 err_init:
4050 	ql_adapter_reset(qdev);
4051 	return err;
4052 }
4053 
ql_release_adapter_resources(struct ql_adapter * qdev)4054 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4055 {
4056 	ql_free_mem_resources(qdev);
4057 	ql_free_irq(qdev);
4058 }
4059 
ql_get_adapter_resources(struct ql_adapter * qdev)4060 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4061 {
4062 	int status = 0;
4063 
4064 	if (ql_alloc_mem_resources(qdev)) {
4065 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4066 		return -ENOMEM;
4067 	}
4068 	status = ql_request_irq(qdev);
4069 	return status;
4070 }
4071 
qlge_close(struct net_device * ndev)4072 static int qlge_close(struct net_device *ndev)
4073 {
4074 	struct ql_adapter *qdev = netdev_priv(ndev);
4075 
4076 	/* If we hit pci_channel_io_perm_failure
4077 	 * failure condition, then we already
4078 	 * brought the adapter down.
4079 	 */
4080 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4081 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4082 		clear_bit(QL_EEH_FATAL, &qdev->flags);
4083 		return 0;
4084 	}
4085 
4086 	/*
4087 	 * Wait for device to recover from a reset.
4088 	 * (Rarely happens, but possible.)
4089 	 */
4090 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4091 		msleep(1);
4092 	ql_adapter_down(qdev);
4093 	ql_release_adapter_resources(qdev);
4094 	return 0;
4095 }
4096 
ql_configure_rings(struct ql_adapter * qdev)4097 static int ql_configure_rings(struct ql_adapter *qdev)
4098 {
4099 	int i;
4100 	struct rx_ring *rx_ring;
4101 	struct tx_ring *tx_ring;
4102 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4103 	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4104 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4105 
4106 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4107 
4108 	/* In a perfect world we have one RSS ring for each CPU
4109 	 * and each has it's own vector.  To do that we ask for
4110 	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
4111 	 * vector count to what we actually get.  We then
4112 	 * allocate an RSS ring for each.
4113 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
4114 	 */
4115 	qdev->intr_count = cpu_cnt;
4116 	ql_enable_msix(qdev);
4117 	/* Adjust the RSS ring count to the actual vector count. */
4118 	qdev->rss_ring_count = qdev->intr_count;
4119 	qdev->tx_ring_count = cpu_cnt;
4120 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4121 
4122 	for (i = 0; i < qdev->tx_ring_count; i++) {
4123 		tx_ring = &qdev->tx_ring[i];
4124 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
4125 		tx_ring->qdev = qdev;
4126 		tx_ring->wq_id = i;
4127 		tx_ring->wq_len = qdev->tx_ring_size;
4128 		tx_ring->wq_size =
4129 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4130 
4131 		/*
4132 		 * The completion queue ID for the tx rings start
4133 		 * immediately after the rss rings.
4134 		 */
4135 		tx_ring->cq_id = qdev->rss_ring_count + i;
4136 	}
4137 
4138 	for (i = 0; i < qdev->rx_ring_count; i++) {
4139 		rx_ring = &qdev->rx_ring[i];
4140 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
4141 		rx_ring->qdev = qdev;
4142 		rx_ring->cq_id = i;
4143 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
4144 		if (i < qdev->rss_ring_count) {
4145 			/*
4146 			 * Inbound (RSS) queues.
4147 			 */
4148 			rx_ring->cq_len = qdev->rx_ring_size;
4149 			rx_ring->cq_size =
4150 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4151 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4152 			rx_ring->lbq_size =
4153 			    rx_ring->lbq_len * sizeof(__le64);
4154 			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4155 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4156 			rx_ring->sbq_size =
4157 			    rx_ring->sbq_len * sizeof(__le64);
4158 			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4159 			rx_ring->type = RX_Q;
4160 		} else {
4161 			/*
4162 			 * Outbound queue handles outbound completions only.
4163 			 */
4164 			/* outbound cq is same size as tx_ring it services. */
4165 			rx_ring->cq_len = qdev->tx_ring_size;
4166 			rx_ring->cq_size =
4167 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4168 			rx_ring->lbq_len = 0;
4169 			rx_ring->lbq_size = 0;
4170 			rx_ring->lbq_buf_size = 0;
4171 			rx_ring->sbq_len = 0;
4172 			rx_ring->sbq_size = 0;
4173 			rx_ring->sbq_buf_size = 0;
4174 			rx_ring->type = TX_Q;
4175 		}
4176 	}
4177 	return 0;
4178 }
4179 
qlge_open(struct net_device * ndev)4180 static int qlge_open(struct net_device *ndev)
4181 {
4182 	int err = 0;
4183 	struct ql_adapter *qdev = netdev_priv(ndev);
4184 
4185 	err = ql_adapter_reset(qdev);
4186 	if (err)
4187 		return err;
4188 
4189 	err = ql_configure_rings(qdev);
4190 	if (err)
4191 		return err;
4192 
4193 	err = ql_get_adapter_resources(qdev);
4194 	if (err)
4195 		goto error_up;
4196 
4197 	err = ql_adapter_up(qdev);
4198 	if (err)
4199 		goto error_up;
4200 
4201 	return err;
4202 
4203 error_up:
4204 	ql_release_adapter_resources(qdev);
4205 	return err;
4206 }
4207 
ql_change_rx_buffers(struct ql_adapter * qdev)4208 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4209 {
4210 	struct rx_ring *rx_ring;
4211 	int i, status;
4212 	u32 lbq_buf_len;
4213 
4214 	/* Wait for an outstanding reset to complete. */
4215 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4216 		int i = 3;
4217 		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4218 			netif_err(qdev, ifup, qdev->ndev,
4219 				  "Waiting for adapter UP...\n");
4220 			ssleep(1);
4221 		}
4222 
4223 		if (!i) {
4224 			netif_err(qdev, ifup, qdev->ndev,
4225 				  "Timed out waiting for adapter UP\n");
4226 			return -ETIMEDOUT;
4227 		}
4228 	}
4229 
4230 	status = ql_adapter_down(qdev);
4231 	if (status)
4232 		goto error;
4233 
4234 	/* Get the new rx buffer size. */
4235 	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4236 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4237 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4238 
4239 	for (i = 0; i < qdev->rss_ring_count; i++) {
4240 		rx_ring = &qdev->rx_ring[i];
4241 		/* Set the new size. */
4242 		rx_ring->lbq_buf_size = lbq_buf_len;
4243 	}
4244 
4245 	status = ql_adapter_up(qdev);
4246 	if (status)
4247 		goto error;
4248 
4249 	return status;
4250 error:
4251 	netif_alert(qdev, ifup, qdev->ndev,
4252 		    "Driver up/down cycle failed, closing device.\n");
4253 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4254 	dev_close(qdev->ndev);
4255 	return status;
4256 }
4257 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4258 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4259 {
4260 	struct ql_adapter *qdev = netdev_priv(ndev);
4261 	int status;
4262 
4263 	if (ndev->mtu == 1500 && new_mtu == 9000) {
4264 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4265 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
4266 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4267 	} else
4268 		return -EINVAL;
4269 
4270 	queue_delayed_work(qdev->workqueue,
4271 			&qdev->mpi_port_cfg_work, 3*HZ);
4272 
4273 	ndev->mtu = new_mtu;
4274 
4275 	if (!netif_running(qdev->ndev)) {
4276 		return 0;
4277 	}
4278 
4279 	status = ql_change_rx_buffers(qdev);
4280 	if (status) {
4281 		netif_err(qdev, ifup, qdev->ndev,
4282 			  "Changing MTU failed.\n");
4283 	}
4284 
4285 	return status;
4286 }
4287 
qlge_get_stats(struct net_device * ndev)4288 static struct net_device_stats *qlge_get_stats(struct net_device
4289 					       *ndev)
4290 {
4291 	struct ql_adapter *qdev = netdev_priv(ndev);
4292 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4293 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4294 	unsigned long pkts, mcast, dropped, errors, bytes;
4295 	int i;
4296 
4297 	/* Get RX stats. */
4298 	pkts = mcast = dropped = errors = bytes = 0;
4299 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4300 			pkts += rx_ring->rx_packets;
4301 			bytes += rx_ring->rx_bytes;
4302 			dropped += rx_ring->rx_dropped;
4303 			errors += rx_ring->rx_errors;
4304 			mcast += rx_ring->rx_multicast;
4305 	}
4306 	ndev->stats.rx_packets = pkts;
4307 	ndev->stats.rx_bytes = bytes;
4308 	ndev->stats.rx_dropped = dropped;
4309 	ndev->stats.rx_errors = errors;
4310 	ndev->stats.multicast = mcast;
4311 
4312 	/* Get TX stats. */
4313 	pkts = errors = bytes = 0;
4314 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4315 			pkts += tx_ring->tx_packets;
4316 			bytes += tx_ring->tx_bytes;
4317 			errors += tx_ring->tx_errors;
4318 	}
4319 	ndev->stats.tx_packets = pkts;
4320 	ndev->stats.tx_bytes = bytes;
4321 	ndev->stats.tx_errors = errors;
4322 	return &ndev->stats;
4323 }
4324 
qlge_set_multicast_list(struct net_device * ndev)4325 static void qlge_set_multicast_list(struct net_device *ndev)
4326 {
4327 	struct ql_adapter *qdev = netdev_priv(ndev);
4328 	struct netdev_hw_addr *ha;
4329 	int i, status;
4330 
4331 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4332 	if (status)
4333 		return;
4334 	/*
4335 	 * Set or clear promiscuous mode if a
4336 	 * transition is taking place.
4337 	 */
4338 	if (ndev->flags & IFF_PROMISC) {
4339 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4340 			if (ql_set_routing_reg
4341 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4342 				netif_err(qdev, hw, qdev->ndev,
4343 					  "Failed to set promiscuous mode.\n");
4344 			} else {
4345 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4346 			}
4347 		}
4348 	} else {
4349 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4350 			if (ql_set_routing_reg
4351 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4352 				netif_err(qdev, hw, qdev->ndev,
4353 					  "Failed to clear promiscuous mode.\n");
4354 			} else {
4355 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4356 			}
4357 		}
4358 	}
4359 
4360 	/*
4361 	 * Set or clear all multicast mode if a
4362 	 * transition is taking place.
4363 	 */
4364 	if ((ndev->flags & IFF_ALLMULTI) ||
4365 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4366 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4367 			if (ql_set_routing_reg
4368 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4369 				netif_err(qdev, hw, qdev->ndev,
4370 					  "Failed to set all-multi mode.\n");
4371 			} else {
4372 				set_bit(QL_ALLMULTI, &qdev->flags);
4373 			}
4374 		}
4375 	} else {
4376 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4377 			if (ql_set_routing_reg
4378 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4379 				netif_err(qdev, hw, qdev->ndev,
4380 					  "Failed to clear all-multi mode.\n");
4381 			} else {
4382 				clear_bit(QL_ALLMULTI, &qdev->flags);
4383 			}
4384 		}
4385 	}
4386 
4387 	if (!netdev_mc_empty(ndev)) {
4388 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4389 		if (status)
4390 			goto exit;
4391 		i = 0;
4392 		netdev_for_each_mc_addr(ha, ndev) {
4393 			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4394 						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4395 				netif_err(qdev, hw, qdev->ndev,
4396 					  "Failed to loadmulticast address.\n");
4397 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4398 				goto exit;
4399 			}
4400 			i++;
4401 		}
4402 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4403 		if (ql_set_routing_reg
4404 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4405 			netif_err(qdev, hw, qdev->ndev,
4406 				  "Failed to set multicast match mode.\n");
4407 		} else {
4408 			set_bit(QL_ALLMULTI, &qdev->flags);
4409 		}
4410 	}
4411 exit:
4412 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4413 }
4414 
qlge_set_mac_address(struct net_device * ndev,void * p)4415 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4416 {
4417 	struct ql_adapter *qdev = netdev_priv(ndev);
4418 	struct sockaddr *addr = p;
4419 	int status;
4420 
4421 	if (!is_valid_ether_addr(addr->sa_data))
4422 		return -EADDRNOTAVAIL;
4423 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4424 	/* Update local copy of current mac address. */
4425 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4426 
4427 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4428 	if (status)
4429 		return status;
4430 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4431 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4432 	if (status)
4433 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4434 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4435 	return status;
4436 }
4437 
qlge_tx_timeout(struct net_device * ndev)4438 static void qlge_tx_timeout(struct net_device *ndev)
4439 {
4440 	struct ql_adapter *qdev = netdev_priv(ndev);
4441 	ql_queue_asic_error(qdev);
4442 }
4443 
ql_asic_reset_work(struct work_struct * work)4444 static void ql_asic_reset_work(struct work_struct *work)
4445 {
4446 	struct ql_adapter *qdev =
4447 	    container_of(work, struct ql_adapter, asic_reset_work.work);
4448 	int status;
4449 	rtnl_lock();
4450 	status = ql_adapter_down(qdev);
4451 	if (status)
4452 		goto error;
4453 
4454 	status = ql_adapter_up(qdev);
4455 	if (status)
4456 		goto error;
4457 
4458 	/* Restore rx mode. */
4459 	clear_bit(QL_ALLMULTI, &qdev->flags);
4460 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4461 	qlge_set_multicast_list(qdev->ndev);
4462 
4463 	rtnl_unlock();
4464 	return;
4465 error:
4466 	netif_alert(qdev, ifup, qdev->ndev,
4467 		    "Driver up/down cycle failed, closing device\n");
4468 
4469 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4470 	dev_close(qdev->ndev);
4471 	rtnl_unlock();
4472 }
4473 
4474 static const struct nic_operations qla8012_nic_ops = {
4475 	.get_flash		= ql_get_8012_flash_params,
4476 	.port_initialize	= ql_8012_port_initialize,
4477 };
4478 
4479 static const struct nic_operations qla8000_nic_ops = {
4480 	.get_flash		= ql_get_8000_flash_params,
4481 	.port_initialize	= ql_8000_port_initialize,
4482 };
4483 
4484 /* Find the pcie function number for the other NIC
4485  * on this chip.  Since both NIC functions share a
4486  * common firmware we have the lowest enabled function
4487  * do any common work.  Examples would be resetting
4488  * after a fatal firmware error, or doing a firmware
4489  * coredump.
4490  */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4491 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4492 {
4493 	int status = 0;
4494 	u32 temp;
4495 	u32 nic_func1, nic_func2;
4496 
4497 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4498 			&temp);
4499 	if (status)
4500 		return status;
4501 
4502 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4503 			MPI_TEST_NIC_FUNC_MASK);
4504 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4505 			MPI_TEST_NIC_FUNC_MASK);
4506 
4507 	if (qdev->func == nic_func1)
4508 		qdev->alt_func = nic_func2;
4509 	else if (qdev->func == nic_func2)
4510 		qdev->alt_func = nic_func1;
4511 	else
4512 		status = -EIO;
4513 
4514 	return status;
4515 }
4516 
ql_get_board_info(struct ql_adapter * qdev)4517 static int ql_get_board_info(struct ql_adapter *qdev)
4518 {
4519 	int status;
4520 	qdev->func =
4521 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4522 	if (qdev->func > 3)
4523 		return -EIO;
4524 
4525 	status = ql_get_alt_pcie_func(qdev);
4526 	if (status)
4527 		return status;
4528 
4529 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4530 	if (qdev->port) {
4531 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4532 		qdev->port_link_up = STS_PL1;
4533 		qdev->port_init = STS_PI1;
4534 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4535 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4536 	} else {
4537 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4538 		qdev->port_link_up = STS_PL0;
4539 		qdev->port_init = STS_PI0;
4540 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4541 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4542 	}
4543 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4544 	qdev->device_id = qdev->pdev->device;
4545 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4546 		qdev->nic_ops = &qla8012_nic_ops;
4547 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4548 		qdev->nic_ops = &qla8000_nic_ops;
4549 	return status;
4550 }
4551 
ql_release_all(struct pci_dev * pdev)4552 static void ql_release_all(struct pci_dev *pdev)
4553 {
4554 	struct net_device *ndev = pci_get_drvdata(pdev);
4555 	struct ql_adapter *qdev = netdev_priv(ndev);
4556 
4557 	if (qdev->workqueue) {
4558 		destroy_workqueue(qdev->workqueue);
4559 		qdev->workqueue = NULL;
4560 	}
4561 
4562 	if (qdev->reg_base)
4563 		iounmap(qdev->reg_base);
4564 	if (qdev->doorbell_area)
4565 		iounmap(qdev->doorbell_area);
4566 	vfree(qdev->mpi_coredump);
4567 	pci_release_regions(pdev);
4568 }
4569 
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4570 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4571 			  int cards_found)
4572 {
4573 	struct ql_adapter *qdev = netdev_priv(ndev);
4574 	int err = 0;
4575 
4576 	memset((void *)qdev, 0, sizeof(*qdev));
4577 	err = pci_enable_device(pdev);
4578 	if (err) {
4579 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4580 		return err;
4581 	}
4582 
4583 	qdev->ndev = ndev;
4584 	qdev->pdev = pdev;
4585 	pci_set_drvdata(pdev, ndev);
4586 
4587 	/* Set PCIe read request size */
4588 	err = pcie_set_readrq(pdev, 4096);
4589 	if (err) {
4590 		dev_err(&pdev->dev, "Set readrq failed.\n");
4591 		goto err_out1;
4592 	}
4593 
4594 	err = pci_request_regions(pdev, DRV_NAME);
4595 	if (err) {
4596 		dev_err(&pdev->dev, "PCI region request failed.\n");
4597 		return err;
4598 	}
4599 
4600 	pci_set_master(pdev);
4601 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4602 		set_bit(QL_DMA64, &qdev->flags);
4603 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4604 	} else {
4605 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4606 		if (!err)
4607 		       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4608 	}
4609 
4610 	if (err) {
4611 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4612 		goto err_out2;
4613 	}
4614 
4615 	/* Set PCIe reset type for EEH to fundamental. */
4616 	pdev->needs_freset = 1;
4617 	pci_save_state(pdev);
4618 	qdev->reg_base =
4619 	    ioremap_nocache(pci_resource_start(pdev, 1),
4620 			    pci_resource_len(pdev, 1));
4621 	if (!qdev->reg_base) {
4622 		dev_err(&pdev->dev, "Register mapping failed.\n");
4623 		err = -ENOMEM;
4624 		goto err_out2;
4625 	}
4626 
4627 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4628 	qdev->doorbell_area =
4629 	    ioremap_nocache(pci_resource_start(pdev, 3),
4630 			    pci_resource_len(pdev, 3));
4631 	if (!qdev->doorbell_area) {
4632 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4633 		err = -ENOMEM;
4634 		goto err_out2;
4635 	}
4636 
4637 	err = ql_get_board_info(qdev);
4638 	if (err) {
4639 		dev_err(&pdev->dev, "Register access failed.\n");
4640 		err = -EIO;
4641 		goto err_out2;
4642 	}
4643 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4644 	spin_lock_init(&qdev->hw_lock);
4645 	spin_lock_init(&qdev->stats_lock);
4646 
4647 	if (qlge_mpi_coredump) {
4648 		qdev->mpi_coredump =
4649 			vmalloc(sizeof(struct ql_mpi_coredump));
4650 		if (qdev->mpi_coredump == NULL) {
4651 			err = -ENOMEM;
4652 			goto err_out2;
4653 		}
4654 		if (qlge_force_coredump)
4655 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4656 	}
4657 	/* make sure the EEPROM is good */
4658 	err = qdev->nic_ops->get_flash(qdev);
4659 	if (err) {
4660 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4661 		goto err_out2;
4662 	}
4663 
4664 	/* Keep local copy of current mac address. */
4665 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4666 
4667 	/* Set up the default ring sizes. */
4668 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4669 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4670 
4671 	/* Set up the coalescing parameters. */
4672 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4673 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4674 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4675 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4676 
4677 	/*
4678 	 * Set up the operating parameters.
4679 	 */
4680 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
4681 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4682 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4683 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4684 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4685 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4686 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4687 	init_completion(&qdev->ide_completion);
4688 	mutex_init(&qdev->mpi_mutex);
4689 
4690 	if (!cards_found) {
4691 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4692 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4693 			 DRV_NAME, DRV_VERSION);
4694 	}
4695 	return 0;
4696 err_out2:
4697 	ql_release_all(pdev);
4698 err_out1:
4699 	pci_disable_device(pdev);
4700 	return err;
4701 }
4702 
4703 static const struct net_device_ops qlge_netdev_ops = {
4704 	.ndo_open		= qlge_open,
4705 	.ndo_stop		= qlge_close,
4706 	.ndo_start_xmit		= qlge_send,
4707 	.ndo_change_mtu		= qlge_change_mtu,
4708 	.ndo_get_stats		= qlge_get_stats,
4709 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4710 	.ndo_set_mac_address	= qlge_set_mac_address,
4711 	.ndo_validate_addr	= eth_validate_addr,
4712 	.ndo_tx_timeout		= qlge_tx_timeout,
4713 	.ndo_fix_features	= qlge_fix_features,
4714 	.ndo_set_features	= qlge_set_features,
4715 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4716 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4717 };
4718 
ql_timer(unsigned long data)4719 static void ql_timer(unsigned long data)
4720 {
4721 	struct ql_adapter *qdev = (struct ql_adapter *)data;
4722 	u32 var = 0;
4723 
4724 	var = ql_read32(qdev, STS);
4725 	if (pci_channel_offline(qdev->pdev)) {
4726 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4727 		return;
4728 	}
4729 
4730 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4731 }
4732 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4733 static int qlge_probe(struct pci_dev *pdev,
4734 		      const struct pci_device_id *pci_entry)
4735 {
4736 	struct net_device *ndev = NULL;
4737 	struct ql_adapter *qdev = NULL;
4738 	static int cards_found = 0;
4739 	int err = 0;
4740 
4741 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4742 			min(MAX_CPUS, netif_get_num_default_rss_queues()));
4743 	if (!ndev)
4744 		return -ENOMEM;
4745 
4746 	err = ql_init_device(pdev, ndev, cards_found);
4747 	if (err < 0) {
4748 		free_netdev(ndev);
4749 		return err;
4750 	}
4751 
4752 	qdev = netdev_priv(ndev);
4753 	SET_NETDEV_DEV(ndev, &pdev->dev);
4754 	ndev->hw_features = NETIF_F_SG |
4755 			    NETIF_F_IP_CSUM |
4756 			    NETIF_F_TSO |
4757 			    NETIF_F_TSO_ECN |
4758 			    NETIF_F_HW_VLAN_CTAG_TX |
4759 			    NETIF_F_HW_VLAN_CTAG_RX |
4760 			    NETIF_F_HW_VLAN_CTAG_FILTER |
4761 			    NETIF_F_RXCSUM;
4762 	ndev->features = ndev->hw_features;
4763 	ndev->vlan_features = ndev->hw_features;
4764 	/* vlan gets same features (except vlan filter) */
4765 	ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4766 				 NETIF_F_HW_VLAN_CTAG_TX |
4767 				 NETIF_F_HW_VLAN_CTAG_RX);
4768 
4769 	if (test_bit(QL_DMA64, &qdev->flags))
4770 		ndev->features |= NETIF_F_HIGHDMA;
4771 
4772 	/*
4773 	 * Set up net_device structure.
4774 	 */
4775 	ndev->tx_queue_len = qdev->tx_ring_size;
4776 	ndev->irq = pdev->irq;
4777 
4778 	ndev->netdev_ops = &qlge_netdev_ops;
4779 	ndev->ethtool_ops = &qlge_ethtool_ops;
4780 	ndev->watchdog_timeo = 10 * HZ;
4781 
4782 	err = register_netdev(ndev);
4783 	if (err) {
4784 		dev_err(&pdev->dev, "net device registration failed.\n");
4785 		ql_release_all(pdev);
4786 		pci_disable_device(pdev);
4787 		free_netdev(ndev);
4788 		return err;
4789 	}
4790 	/* Start up the timer to trigger EEH if
4791 	 * the bus goes dead
4792 	 */
4793 	init_timer_deferrable(&qdev->timer);
4794 	qdev->timer.data = (unsigned long)qdev;
4795 	qdev->timer.function = ql_timer;
4796 	qdev->timer.expires = jiffies + (5*HZ);
4797 	add_timer(&qdev->timer);
4798 	ql_link_off(qdev);
4799 	ql_display_dev_info(ndev);
4800 	atomic_set(&qdev->lb_count, 0);
4801 	cards_found++;
4802 	return 0;
4803 }
4804 
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4805 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4806 {
4807 	return qlge_send(skb, ndev);
4808 }
4809 
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4810 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4811 {
4812 	return ql_clean_inbound_rx_ring(rx_ring, budget);
4813 }
4814 
qlge_remove(struct pci_dev * pdev)4815 static void qlge_remove(struct pci_dev *pdev)
4816 {
4817 	struct net_device *ndev = pci_get_drvdata(pdev);
4818 	struct ql_adapter *qdev = netdev_priv(ndev);
4819 	del_timer_sync(&qdev->timer);
4820 	ql_cancel_all_work_sync(qdev);
4821 	unregister_netdev(ndev);
4822 	ql_release_all(pdev);
4823 	pci_disable_device(pdev);
4824 	free_netdev(ndev);
4825 }
4826 
4827 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4828 static void ql_eeh_close(struct net_device *ndev)
4829 {
4830 	int i;
4831 	struct ql_adapter *qdev = netdev_priv(ndev);
4832 
4833 	if (netif_carrier_ok(ndev)) {
4834 		netif_carrier_off(ndev);
4835 		netif_stop_queue(ndev);
4836 	}
4837 
4838 	/* Disabling the timer */
4839 	del_timer_sync(&qdev->timer);
4840 	ql_cancel_all_work_sync(qdev);
4841 
4842 	for (i = 0; i < qdev->rss_ring_count; i++)
4843 		netif_napi_del(&qdev->rx_ring[i].napi);
4844 
4845 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4846 	ql_tx_ring_clean(qdev);
4847 	ql_free_rx_buffers(qdev);
4848 	ql_release_adapter_resources(qdev);
4849 }
4850 
4851 /*
4852  * This callback is called by the PCI subsystem whenever
4853  * a PCI bus error is detected.
4854  */
qlge_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)4855 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4856 					       enum pci_channel_state state)
4857 {
4858 	struct net_device *ndev = pci_get_drvdata(pdev);
4859 	struct ql_adapter *qdev = netdev_priv(ndev);
4860 
4861 	switch (state) {
4862 	case pci_channel_io_normal:
4863 		return PCI_ERS_RESULT_CAN_RECOVER;
4864 	case pci_channel_io_frozen:
4865 		netif_device_detach(ndev);
4866 		if (netif_running(ndev))
4867 			ql_eeh_close(ndev);
4868 		pci_disable_device(pdev);
4869 		return PCI_ERS_RESULT_NEED_RESET;
4870 	case pci_channel_io_perm_failure:
4871 		dev_err(&pdev->dev,
4872 			"%s: pci_channel_io_perm_failure.\n", __func__);
4873 		ql_eeh_close(ndev);
4874 		set_bit(QL_EEH_FATAL, &qdev->flags);
4875 		return PCI_ERS_RESULT_DISCONNECT;
4876 	}
4877 
4878 	/* Request a slot reset. */
4879 	return PCI_ERS_RESULT_NEED_RESET;
4880 }
4881 
4882 /*
4883  * This callback is called after the PCI buss has been reset.
4884  * Basically, this tries to restart the card from scratch.
4885  * This is a shortened version of the device probe/discovery code,
4886  * it resembles the first-half of the () routine.
4887  */
qlge_io_slot_reset(struct pci_dev * pdev)4888 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4889 {
4890 	struct net_device *ndev = pci_get_drvdata(pdev);
4891 	struct ql_adapter *qdev = netdev_priv(ndev);
4892 
4893 	pdev->error_state = pci_channel_io_normal;
4894 
4895 	pci_restore_state(pdev);
4896 	if (pci_enable_device(pdev)) {
4897 		netif_err(qdev, ifup, qdev->ndev,
4898 			  "Cannot re-enable PCI device after reset.\n");
4899 		return PCI_ERS_RESULT_DISCONNECT;
4900 	}
4901 	pci_set_master(pdev);
4902 
4903 	if (ql_adapter_reset(qdev)) {
4904 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4905 		set_bit(QL_EEH_FATAL, &qdev->flags);
4906 		return PCI_ERS_RESULT_DISCONNECT;
4907 	}
4908 
4909 	return PCI_ERS_RESULT_RECOVERED;
4910 }
4911 
qlge_io_resume(struct pci_dev * pdev)4912 static void qlge_io_resume(struct pci_dev *pdev)
4913 {
4914 	struct net_device *ndev = pci_get_drvdata(pdev);
4915 	struct ql_adapter *qdev = netdev_priv(ndev);
4916 	int err = 0;
4917 
4918 	if (netif_running(ndev)) {
4919 		err = qlge_open(ndev);
4920 		if (err) {
4921 			netif_err(qdev, ifup, qdev->ndev,
4922 				  "Device initialization failed after reset.\n");
4923 			return;
4924 		}
4925 	} else {
4926 		netif_err(qdev, ifup, qdev->ndev,
4927 			  "Device was not running prior to EEH.\n");
4928 	}
4929 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4930 	netif_device_attach(ndev);
4931 }
4932 
4933 static const struct pci_error_handlers qlge_err_handler = {
4934 	.error_detected = qlge_io_error_detected,
4935 	.slot_reset = qlge_io_slot_reset,
4936 	.resume = qlge_io_resume,
4937 };
4938 
qlge_suspend(struct pci_dev * pdev,pm_message_t state)4939 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4940 {
4941 	struct net_device *ndev = pci_get_drvdata(pdev);
4942 	struct ql_adapter *qdev = netdev_priv(ndev);
4943 	int err;
4944 
4945 	netif_device_detach(ndev);
4946 	del_timer_sync(&qdev->timer);
4947 
4948 	if (netif_running(ndev)) {
4949 		err = ql_adapter_down(qdev);
4950 		if (!err)
4951 			return err;
4952 	}
4953 
4954 	ql_wol(qdev);
4955 	err = pci_save_state(pdev);
4956 	if (err)
4957 		return err;
4958 
4959 	pci_disable_device(pdev);
4960 
4961 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
4962 
4963 	return 0;
4964 }
4965 
4966 #ifdef CONFIG_PM
qlge_resume(struct pci_dev * pdev)4967 static int qlge_resume(struct pci_dev *pdev)
4968 {
4969 	struct net_device *ndev = pci_get_drvdata(pdev);
4970 	struct ql_adapter *qdev = netdev_priv(ndev);
4971 	int err;
4972 
4973 	pci_set_power_state(pdev, PCI_D0);
4974 	pci_restore_state(pdev);
4975 	err = pci_enable_device(pdev);
4976 	if (err) {
4977 		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4978 		return err;
4979 	}
4980 	pci_set_master(pdev);
4981 
4982 	pci_enable_wake(pdev, PCI_D3hot, 0);
4983 	pci_enable_wake(pdev, PCI_D3cold, 0);
4984 
4985 	if (netif_running(ndev)) {
4986 		err = ql_adapter_up(qdev);
4987 		if (err)
4988 			return err;
4989 	}
4990 
4991 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4992 	netif_device_attach(ndev);
4993 
4994 	return 0;
4995 }
4996 #endif /* CONFIG_PM */
4997 
qlge_shutdown(struct pci_dev * pdev)4998 static void qlge_shutdown(struct pci_dev *pdev)
4999 {
5000 	qlge_suspend(pdev, PMSG_SUSPEND);
5001 }
5002 
5003 static struct pci_driver qlge_driver = {
5004 	.name = DRV_NAME,
5005 	.id_table = qlge_pci_tbl,
5006 	.probe = qlge_probe,
5007 	.remove = qlge_remove,
5008 #ifdef CONFIG_PM
5009 	.suspend = qlge_suspend,
5010 	.resume = qlge_resume,
5011 #endif
5012 	.shutdown = qlge_shutdown,
5013 	.err_handler = &qlge_err_handler
5014 };
5015 
5016 module_pci_driver(qlge_driver);
5017