This source file includes following definitions.
- lan78xx_read_reg
- lan78xx_write_reg
- lan78xx_read_stats
- lan78xx_check_stat_rollover
- lan78xx_update_stats
- lan78xx_phy_wait_not_busy
- mii_access
- lan78xx_wait_eeprom
- lan78xx_eeprom_confirm_not_busy
- lan78xx_read_raw_eeprom
- lan78xx_read_eeprom
- lan78xx_write_raw_eeprom
- lan78xx_read_raw_otp
- lan78xx_write_raw_otp
- lan78xx_read_otp
- lan78xx_dataport_wait_not_busy
- lan78xx_dataport_write
- lan78xx_set_addr_filter
- lan78xx_hash
- lan78xx_deferred_multicast_write
- lan78xx_set_multicast
- lan78xx_update_flowcontrol
- lan78xx_link_reset
- lan78xx_defer_kevent
- lan78xx_status
- lan78xx_ethtool_get_eeprom_len
- lan78xx_ethtool_get_eeprom
- lan78xx_ethtool_set_eeprom
- lan78xx_get_strings
- lan78xx_get_sset_count
- lan78xx_get_stats
- lan78xx_get_wol
- lan78xx_set_wol
- lan78xx_get_eee
- lan78xx_set_eee
- lan78xx_get_link
- lan78xx_get_drvinfo
- lan78xx_get_msglevel
- lan78xx_set_msglevel
- lan78xx_get_link_ksettings
- lan78xx_set_link_ksettings
- lan78xx_get_pause
- lan78xx_set_pause
- lan78xx_get_regs_len
- lan78xx_get_regs
- lan78xx_ioctl
- lan78xx_init_mac_address
- lan78xx_mdiobus_read
- lan78xx_mdiobus_write
- lan78xx_mdio_init
- lan78xx_remove_mdio
- lan78xx_link_status_change
- irq_map
- irq_unmap
- lan78xx_irq_mask
- lan78xx_irq_unmask
- lan78xx_irq_bus_lock
- lan78xx_irq_bus_sync_unlock
- lan78xx_setup_irq_domain
- lan78xx_remove_irq_domain
- lan8835_fixup
- ksz9031rnx_fixup
- lan7801_phy_init
- lan78xx_phy_init
- lan78xx_set_rx_max_frame_length
- unlink_urbs
- lan78xx_change_mtu
- lan78xx_set_mac_addr
- lan78xx_set_features
- lan78xx_deferred_vlan_write
- lan78xx_vlan_rx_add_vid
- lan78xx_vlan_rx_kill_vid
- lan78xx_init_ltm
- lan78xx_reset
- lan78xx_init_stats
- lan78xx_open
- lan78xx_terminate_urbs
- lan78xx_stop
- lan78xx_tx_prep
- defer_bh
- tx_complete
- lan78xx_queue_skb
- lan78xx_start_xmit
- lan78xx_get_endpoints
- lan78xx_bind
- lan78xx_unbind
- lan78xx_rx_csum_offload
- lan78xx_rx_vlan_offload
- lan78xx_skb_return
- lan78xx_rx
- rx_process
- rx_submit
- rx_complete
- lan78xx_tx_bh
- lan78xx_rx_bh
- lan78xx_bh
- lan78xx_delayedwork
- intr_complete
- lan78xx_disconnect
- lan78xx_tx_timeout
- lan78xx_features_check
- lan78xx_stat_monitor
- lan78xx_probe
- lan78xx_wakeframe_crc16
- lan78xx_set_suspend
- lan78xx_suspend
- lan78xx_resume
- lan78xx_reset_resume
1
2
3
4
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33
34 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME "lan78xx"
37
38 #define TX_TIMEOUT_JIFFIES (5 * HZ)
39 #define THROTTLE_JIFFIES (HZ / 8)
40 #define UNLINK_TIMEOUT_MS 3
41
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43
44 #define SS_USB_PKT_SIZE (1024)
45 #define HS_USB_PKT_SIZE (512)
46 #define FS_USB_PKT_SIZE (64)
47
48 #define MAX_RX_FIFO_SIZE (12 * 1024)
49 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY (0x0800)
52 #define MAX_SINGLE_PACKET_SIZE (9000)
53 #define DEFAULT_TX_CSUM_ENABLE (true)
54 #define DEFAULT_RX_CSUM_ENABLE (true)
55 #define DEFAULT_TSO_CSUM_ENABLE (true)
56 #define DEFAULT_VLAN_FILTER_ENABLE (true)
57 #define DEFAULT_VLAN_RX_OFFLOAD (true)
58 #define TX_OVERHEAD (8)
59 #define RXW_PADDING 2
60
61 #define LAN78XX_USB_VENDOR_ID (0x0424)
62 #define LAN7800_USB_PRODUCT_ID (0x7800)
63 #define LAN7850_USB_PRODUCT_ID (0x7850)
64 #define LAN7801_USB_PRODUCT_ID (0x7801)
65 #define LAN78XX_EEPROM_MAGIC (0x78A5)
66 #define LAN78XX_OTP_MAGIC (0x78F3)
67
68 #define MII_READ 1
69 #define MII_WRITE 0
70
71 #define EEPROM_INDICATOR (0xA5)
72 #define EEPROM_MAC_OFFSET (0x01)
73 #define MAX_EEPROM_SIZE 512
74 #define OTP_INDICATOR_1 (0xF3)
75 #define OTP_INDICATOR_2 (0xF7)
76
77 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
78 WAKE_MCAST | WAKE_BCAST | \
79 WAKE_ARP | WAKE_MAGIC)
80
81
82 #define BULK_IN_PIPE 1
83 #define BULK_OUT_PIPE 2
84
85
86 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
87
88
89 #define STAT_UPDATE_TIMER (1 * 1000)
90
91
92 #define MAX_INT_EP (32)
93 #define INT_EP_INTEP (31)
94 #define INT_EP_OTP_WR_DONE (28)
95 #define INT_EP_EEE_TX_LPI_START (26)
96 #define INT_EP_EEE_TX_LPI_STOP (25)
97 #define INT_EP_EEE_RX_LPI (24)
98 #define INT_EP_MAC_RESET_TIMEOUT (23)
99 #define INT_EP_RDFO (22)
100 #define INT_EP_TXE (21)
101 #define INT_EP_USB_STATUS (20)
102 #define INT_EP_TX_DIS (19)
103 #define INT_EP_RX_DIS (18)
104 #define INT_EP_PHY (17)
105 #define INT_EP_DP (16)
106 #define INT_EP_MAC_ERR (15)
107 #define INT_EP_TDFU (14)
108 #define INT_EP_TDFO (13)
109 #define INT_EP_UTX (12)
110 #define INT_EP_GPIO_11 (11)
111 #define INT_EP_GPIO_10 (10)
112 #define INT_EP_GPIO_9 (9)
113 #define INT_EP_GPIO_8 (8)
114 #define INT_EP_GPIO_7 (7)
115 #define INT_EP_GPIO_6 (6)
116 #define INT_EP_GPIO_5 (5)
117 #define INT_EP_GPIO_4 (4)
118 #define INT_EP_GPIO_3 (3)
119 #define INT_EP_GPIO_2 (2)
120 #define INT_EP_GPIO_1 (1)
121 #define INT_EP_GPIO_0 (0)
122
123 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
124 "RX FCS Errors",
125 "RX Alignment Errors",
126 "Rx Fragment Errors",
127 "RX Jabber Errors",
128 "RX Undersize Frame Errors",
129 "RX Oversize Frame Errors",
130 "RX Dropped Frames",
131 "RX Unicast Byte Count",
132 "RX Broadcast Byte Count",
133 "RX Multicast Byte Count",
134 "RX Unicast Frames",
135 "RX Broadcast Frames",
136 "RX Multicast Frames",
137 "RX Pause Frames",
138 "RX 64 Byte Frames",
139 "RX 65 - 127 Byte Frames",
140 "RX 128 - 255 Byte Frames",
141 "RX 256 - 511 Bytes Frames",
142 "RX 512 - 1023 Byte Frames",
143 "RX 1024 - 1518 Byte Frames",
144 "RX Greater 1518 Byte Frames",
145 "EEE RX LPI Transitions",
146 "EEE RX LPI Time",
147 "TX FCS Errors",
148 "TX Excess Deferral Errors",
149 "TX Carrier Errors",
150 "TX Bad Byte Count",
151 "TX Single Collisions",
152 "TX Multiple Collisions",
153 "TX Excessive Collision",
154 "TX Late Collisions",
155 "TX Unicast Byte Count",
156 "TX Broadcast Byte Count",
157 "TX Multicast Byte Count",
158 "TX Unicast Frames",
159 "TX Broadcast Frames",
160 "TX Multicast Frames",
161 "TX Pause Frames",
162 "TX 64 Byte Frames",
163 "TX 65 - 127 Byte Frames",
164 "TX 128 - 255 Byte Frames",
165 "TX 256 - 511 Bytes Frames",
166 "TX 512 - 1023 Byte Frames",
167 "TX 1024 - 1518 Byte Frames",
168 "TX Greater 1518 Byte Frames",
169 "EEE TX LPI Transitions",
170 "EEE TX LPI Time",
171 };
172
173 struct lan78xx_statstage {
174 u32 rx_fcs_errors;
175 u32 rx_alignment_errors;
176 u32 rx_fragment_errors;
177 u32 rx_jabber_errors;
178 u32 rx_undersize_frame_errors;
179 u32 rx_oversize_frame_errors;
180 u32 rx_dropped_frames;
181 u32 rx_unicast_byte_count;
182 u32 rx_broadcast_byte_count;
183 u32 rx_multicast_byte_count;
184 u32 rx_unicast_frames;
185 u32 rx_broadcast_frames;
186 u32 rx_multicast_frames;
187 u32 rx_pause_frames;
188 u32 rx_64_byte_frames;
189 u32 rx_65_127_byte_frames;
190 u32 rx_128_255_byte_frames;
191 u32 rx_256_511_bytes_frames;
192 u32 rx_512_1023_byte_frames;
193 u32 rx_1024_1518_byte_frames;
194 u32 rx_greater_1518_byte_frames;
195 u32 eee_rx_lpi_transitions;
196 u32 eee_rx_lpi_time;
197 u32 tx_fcs_errors;
198 u32 tx_excess_deferral_errors;
199 u32 tx_carrier_errors;
200 u32 tx_bad_byte_count;
201 u32 tx_single_collisions;
202 u32 tx_multiple_collisions;
203 u32 tx_excessive_collision;
204 u32 tx_late_collisions;
205 u32 tx_unicast_byte_count;
206 u32 tx_broadcast_byte_count;
207 u32 tx_multicast_byte_count;
208 u32 tx_unicast_frames;
209 u32 tx_broadcast_frames;
210 u32 tx_multicast_frames;
211 u32 tx_pause_frames;
212 u32 tx_64_byte_frames;
213 u32 tx_65_127_byte_frames;
214 u32 tx_128_255_byte_frames;
215 u32 tx_256_511_bytes_frames;
216 u32 tx_512_1023_byte_frames;
217 u32 tx_1024_1518_byte_frames;
218 u32 tx_greater_1518_byte_frames;
219 u32 eee_tx_lpi_transitions;
220 u32 eee_tx_lpi_time;
221 };
222
223 struct lan78xx_statstage64 {
224 u64 rx_fcs_errors;
225 u64 rx_alignment_errors;
226 u64 rx_fragment_errors;
227 u64 rx_jabber_errors;
228 u64 rx_undersize_frame_errors;
229 u64 rx_oversize_frame_errors;
230 u64 rx_dropped_frames;
231 u64 rx_unicast_byte_count;
232 u64 rx_broadcast_byte_count;
233 u64 rx_multicast_byte_count;
234 u64 rx_unicast_frames;
235 u64 rx_broadcast_frames;
236 u64 rx_multicast_frames;
237 u64 rx_pause_frames;
238 u64 rx_64_byte_frames;
239 u64 rx_65_127_byte_frames;
240 u64 rx_128_255_byte_frames;
241 u64 rx_256_511_bytes_frames;
242 u64 rx_512_1023_byte_frames;
243 u64 rx_1024_1518_byte_frames;
244 u64 rx_greater_1518_byte_frames;
245 u64 eee_rx_lpi_transitions;
246 u64 eee_rx_lpi_time;
247 u64 tx_fcs_errors;
248 u64 tx_excess_deferral_errors;
249 u64 tx_carrier_errors;
250 u64 tx_bad_byte_count;
251 u64 tx_single_collisions;
252 u64 tx_multiple_collisions;
253 u64 tx_excessive_collision;
254 u64 tx_late_collisions;
255 u64 tx_unicast_byte_count;
256 u64 tx_broadcast_byte_count;
257 u64 tx_multicast_byte_count;
258 u64 tx_unicast_frames;
259 u64 tx_broadcast_frames;
260 u64 tx_multicast_frames;
261 u64 tx_pause_frames;
262 u64 tx_64_byte_frames;
263 u64 tx_65_127_byte_frames;
264 u64 tx_128_255_byte_frames;
265 u64 tx_256_511_bytes_frames;
266 u64 tx_512_1023_byte_frames;
267 u64 tx_1024_1518_byte_frames;
268 u64 tx_greater_1518_byte_frames;
269 u64 eee_tx_lpi_transitions;
270 u64 eee_tx_lpi_time;
271 };
272
273 static u32 lan78xx_regs[] = {
274 ID_REV,
275 INT_STS,
276 HW_CFG,
277 PMT_CTL,
278 E2P_CMD,
279 E2P_DATA,
280 USB_STATUS,
281 VLAN_TYPE,
282 MAC_CR,
283 MAC_RX,
284 MAC_TX,
285 FLOW,
286 ERR_STS,
287 MII_ACC,
288 MII_DATA,
289 EEE_TX_LPI_REQ_DLY,
290 EEE_TW_TX_SYS,
291 EEE_TX_LPI_REM_DLY,
292 WUCSR
293 };
294
295 #define PHY_REG_SIZE (32 * sizeof(u32))
296
297 struct lan78xx_net;
298
299 struct lan78xx_priv {
300 struct lan78xx_net *dev;
301 u32 rfe_ctl;
302 u32 mchash_table[DP_SEL_VHF_HASH_LEN];
303 u32 pfilter_table[NUM_OF_MAF][2];
304 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
305 struct mutex dataport_mutex;
306 spinlock_t rfe_ctl_lock;
307 struct work_struct set_multicast;
308 struct work_struct set_vlan;
309 u32 wol;
310 };
311
312 enum skb_state {
313 illegal = 0,
314 tx_start,
315 tx_done,
316 rx_start,
317 rx_done,
318 rx_cleanup,
319 unlink_start
320 };
321
322 struct skb_data {
323 struct urb *urb;
324 struct lan78xx_net *dev;
325 enum skb_state state;
326 size_t length;
327 int num_of_packet;
328 };
329
330 struct usb_context {
331 struct usb_ctrlrequest req;
332 struct lan78xx_net *dev;
333 };
334
335 #define EVENT_TX_HALT 0
336 #define EVENT_RX_HALT 1
337 #define EVENT_RX_MEMORY 2
338 #define EVENT_STS_SPLIT 3
339 #define EVENT_LINK_RESET 4
340 #define EVENT_RX_PAUSED 5
341 #define EVENT_DEV_WAKING 6
342 #define EVENT_DEV_ASLEEP 7
343 #define EVENT_DEV_OPEN 8
344 #define EVENT_STAT_UPDATE 9
345
346 struct statstage {
347 struct mutex access_lock;
348 struct lan78xx_statstage saved;
349 struct lan78xx_statstage rollover_count;
350 struct lan78xx_statstage rollover_max;
351 struct lan78xx_statstage64 curr_stat;
352 };
353
354 struct irq_domain_data {
355 struct irq_domain *irqdomain;
356 unsigned int phyirq;
357 struct irq_chip *irqchip;
358 irq_flow_handler_t irq_handler;
359 u32 irqenable;
360 struct mutex irq_lock;
361 };
362
363 struct lan78xx_net {
364 struct net_device *net;
365 struct usb_device *udev;
366 struct usb_interface *intf;
367 void *driver_priv;
368
369 int rx_qlen;
370 int tx_qlen;
371 struct sk_buff_head rxq;
372 struct sk_buff_head txq;
373 struct sk_buff_head done;
374 struct sk_buff_head rxq_pause;
375 struct sk_buff_head txq_pend;
376
377 struct tasklet_struct bh;
378 struct delayed_work wq;
379
380 struct usb_host_endpoint *ep_blkin;
381 struct usb_host_endpoint *ep_blkout;
382 struct usb_host_endpoint *ep_intr;
383
384 int msg_enable;
385
386 struct urb *urb_intr;
387 struct usb_anchor deferred;
388
389 struct mutex phy_mutex;
390 unsigned pipe_in, pipe_out, pipe_intr;
391
392 u32 hard_mtu;
393 size_t rx_urb_size;
394
395 unsigned long flags;
396
397 wait_queue_head_t *wait;
398 unsigned char suspend_count;
399
400 unsigned maxpacket;
401 struct timer_list delay;
402 struct timer_list stat_monitor;
403
404 unsigned long data[5];
405
406 int link_on;
407 u8 mdix_ctrl;
408
409 u32 chipid;
410 u32 chiprev;
411 struct mii_bus *mdiobus;
412 phy_interface_t interface;
413
414 int fc_autoneg;
415 u8 fc_request_control;
416
417 int delta;
418 struct statstage stats;
419
420 struct irq_domain_data domain_data;
421 };
422
423
424 #define PHY_LAN8835 (0x0007C130)
425 #define PHY_KSZ9031RNX (0x00221620)
426
427
428 static int msg_level = -1;
429 module_param(msg_level, int, 0);
430 MODULE_PARM_DESC(msg_level, "Override default message level");
431
432 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
433 {
434 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
435 int ret;
436
437 if (!buf)
438 return -ENOMEM;
439
440 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
441 USB_VENDOR_REQUEST_READ_REGISTER,
442 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
443 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
444 if (likely(ret >= 0)) {
445 le32_to_cpus(buf);
446 *data = *buf;
447 } else {
448 netdev_warn(dev->net,
449 "Failed to read register index 0x%08x. ret = %d",
450 index, ret);
451 }
452
453 kfree(buf);
454
455 return ret;
456 }
457
458 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
459 {
460 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
461 int ret;
462
463 if (!buf)
464 return -ENOMEM;
465
466 *buf = data;
467 cpu_to_le32s(buf);
468
469 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
470 USB_VENDOR_REQUEST_WRITE_REGISTER,
471 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
472 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
473 if (unlikely(ret < 0)) {
474 netdev_warn(dev->net,
475 "Failed to write register index 0x%08x. ret = %d",
476 index, ret);
477 }
478
479 kfree(buf);
480
481 return ret;
482 }
483
484 static int lan78xx_read_stats(struct lan78xx_net *dev,
485 struct lan78xx_statstage *data)
486 {
487 int ret = 0;
488 int i;
489 struct lan78xx_statstage *stats;
490 u32 *src;
491 u32 *dst;
492
493 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
494 if (!stats)
495 return -ENOMEM;
496
497 ret = usb_control_msg(dev->udev,
498 usb_rcvctrlpipe(dev->udev, 0),
499 USB_VENDOR_REQUEST_GET_STATS,
500 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
501 0,
502 0,
503 (void *)stats,
504 sizeof(*stats),
505 USB_CTRL_SET_TIMEOUT);
506 if (likely(ret >= 0)) {
507 src = (u32 *)stats;
508 dst = (u32 *)data;
509 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
510 le32_to_cpus(&src[i]);
511 dst[i] = src[i];
512 }
513 } else {
514 netdev_warn(dev->net,
515 "Failed to read stat ret = %d", ret);
516 }
517
518 kfree(stats);
519
520 return ret;
521 }
522
523 #define check_counter_rollover(struct1, dev_stats, member) { \
524 if (struct1->member < dev_stats.saved.member) \
525 dev_stats.rollover_count.member++; \
526 }
527
528 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
529 struct lan78xx_statstage *stats)
530 {
531 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
532 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
533 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
534 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
535 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
536 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
537 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
538 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
539 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
540 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
541 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
542 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
543 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
544 check_counter_rollover(stats, dev->stats, rx_pause_frames);
545 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
546 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
547 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
548 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
549 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
550 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
551 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
552 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
553 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
554 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
555 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
556 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
557 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
558 check_counter_rollover(stats, dev->stats, tx_single_collisions);
559 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
560 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
561 check_counter_rollover(stats, dev->stats, tx_late_collisions);
562 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
563 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
564 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
565 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
566 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
567 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
568 check_counter_rollover(stats, dev->stats, tx_pause_frames);
569 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
570 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
571 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
572 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
573 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
574 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
575 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
576 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
577 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
578
579 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
580 }
581
582 static void lan78xx_update_stats(struct lan78xx_net *dev)
583 {
584 u32 *p, *count, *max;
585 u64 *data;
586 int i;
587 struct lan78xx_statstage lan78xx_stats;
588
589 if (usb_autopm_get_interface(dev->intf) < 0)
590 return;
591
592 p = (u32 *)&lan78xx_stats;
593 count = (u32 *)&dev->stats.rollover_count;
594 max = (u32 *)&dev->stats.rollover_max;
595 data = (u64 *)&dev->stats.curr_stat;
596
597 mutex_lock(&dev->stats.access_lock);
598
599 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
600 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
601
602 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
603 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
604
605 mutex_unlock(&dev->stats.access_lock);
606
607 usb_autopm_put_interface(dev->intf);
608 }
609
610
611 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
612 {
613 unsigned long start_time = jiffies;
614 u32 val;
615 int ret;
616
617 do {
618 ret = lan78xx_read_reg(dev, MII_ACC, &val);
619 if (unlikely(ret < 0))
620 return -EIO;
621
622 if (!(val & MII_ACC_MII_BUSY_))
623 return 0;
624 } while (!time_after(jiffies, start_time + HZ));
625
626 return -EIO;
627 }
628
629 static inline u32 mii_access(int id, int index, int read)
630 {
631 u32 ret;
632
633 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
634 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
635 if (read)
636 ret |= MII_ACC_MII_READ_;
637 else
638 ret |= MII_ACC_MII_WRITE_;
639 ret |= MII_ACC_MII_BUSY_;
640
641 return ret;
642 }
643
644 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
645 {
646 unsigned long start_time = jiffies;
647 u32 val;
648 int ret;
649
650 do {
651 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
652 if (unlikely(ret < 0))
653 return -EIO;
654
655 if (!(val & E2P_CMD_EPC_BUSY_) ||
656 (val & E2P_CMD_EPC_TIMEOUT_))
657 break;
658 usleep_range(40, 100);
659 } while (!time_after(jiffies, start_time + HZ));
660
661 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
662 netdev_warn(dev->net, "EEPROM read operation timeout");
663 return -EIO;
664 }
665
666 return 0;
667 }
668
669 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
670 {
671 unsigned long start_time = jiffies;
672 u32 val;
673 int ret;
674
675 do {
676 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
677 if (unlikely(ret < 0))
678 return -EIO;
679
680 if (!(val & E2P_CMD_EPC_BUSY_))
681 return 0;
682
683 usleep_range(40, 100);
684 } while (!time_after(jiffies, start_time + HZ));
685
686 netdev_warn(dev->net, "EEPROM is busy");
687 return -EIO;
688 }
689
690 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
691 u32 length, u8 *data)
692 {
693 u32 val;
694 u32 saved;
695 int i, ret;
696 int retval;
697
698
699
700
701 ret = lan78xx_read_reg(dev, HW_CFG, &val);
702 saved = val;
703 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
704 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
705 ret = lan78xx_write_reg(dev, HW_CFG, val);
706 }
707
708 retval = lan78xx_eeprom_confirm_not_busy(dev);
709 if (retval)
710 return retval;
711
712 for (i = 0; i < length; i++) {
713 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
714 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
715 ret = lan78xx_write_reg(dev, E2P_CMD, val);
716 if (unlikely(ret < 0)) {
717 retval = -EIO;
718 goto exit;
719 }
720
721 retval = lan78xx_wait_eeprom(dev);
722 if (retval < 0)
723 goto exit;
724
725 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
726 if (unlikely(ret < 0)) {
727 retval = -EIO;
728 goto exit;
729 }
730
731 data[i] = val & 0xFF;
732 offset++;
733 }
734
735 retval = 0;
736 exit:
737 if (dev->chipid == ID_REV_CHIP_ID_7800_)
738 ret = lan78xx_write_reg(dev, HW_CFG, saved);
739
740 return retval;
741 }
742
743 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
744 u32 length, u8 *data)
745 {
746 u8 sig;
747 int ret;
748
749 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
750 if ((ret == 0) && (sig == EEPROM_INDICATOR))
751 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
752 else
753 ret = -EINVAL;
754
755 return ret;
756 }
757
758 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
759 u32 length, u8 *data)
760 {
761 u32 val;
762 u32 saved;
763 int i, ret;
764 int retval;
765
766
767
768
769 ret = lan78xx_read_reg(dev, HW_CFG, &val);
770 saved = val;
771 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
772 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
773 ret = lan78xx_write_reg(dev, HW_CFG, val);
774 }
775
776 retval = lan78xx_eeprom_confirm_not_busy(dev);
777 if (retval)
778 goto exit;
779
780
781 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
782 ret = lan78xx_write_reg(dev, E2P_CMD, val);
783 if (unlikely(ret < 0)) {
784 retval = -EIO;
785 goto exit;
786 }
787
788 retval = lan78xx_wait_eeprom(dev);
789 if (retval < 0)
790 goto exit;
791
792 for (i = 0; i < length; i++) {
793
794 val = data[i];
795 ret = lan78xx_write_reg(dev, E2P_DATA, val);
796 if (ret < 0) {
797 retval = -EIO;
798 goto exit;
799 }
800
801
802 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
803 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
804 ret = lan78xx_write_reg(dev, E2P_CMD, val);
805 if (ret < 0) {
806 retval = -EIO;
807 goto exit;
808 }
809
810 retval = lan78xx_wait_eeprom(dev);
811 if (retval < 0)
812 goto exit;
813
814 offset++;
815 }
816
817 retval = 0;
818 exit:
819 if (dev->chipid == ID_REV_CHIP_ID_7800_)
820 ret = lan78xx_write_reg(dev, HW_CFG, saved);
821
822 return retval;
823 }
824
825 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
826 u32 length, u8 *data)
827 {
828 int i;
829 int ret;
830 u32 buf;
831 unsigned long timeout;
832
833 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
834
835 if (buf & OTP_PWR_DN_PWRDN_N_) {
836
837 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
838
839 timeout = jiffies + HZ;
840 do {
841 usleep_range(1, 10);
842 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
843 if (time_after(jiffies, timeout)) {
844 netdev_warn(dev->net,
845 "timeout on OTP_PWR_DN");
846 return -EIO;
847 }
848 } while (buf & OTP_PWR_DN_PWRDN_N_);
849 }
850
851 for (i = 0; i < length; i++) {
852 ret = lan78xx_write_reg(dev, OTP_ADDR1,
853 ((offset + i) >> 8) & OTP_ADDR1_15_11);
854 ret = lan78xx_write_reg(dev, OTP_ADDR2,
855 ((offset + i) & OTP_ADDR2_10_3));
856
857 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
858 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
859
860 timeout = jiffies + HZ;
861 do {
862 udelay(1);
863 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
864 if (time_after(jiffies, timeout)) {
865 netdev_warn(dev->net,
866 "timeout on OTP_STATUS");
867 return -EIO;
868 }
869 } while (buf & OTP_STATUS_BUSY_);
870
871 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
872
873 data[i] = (u8)(buf & 0xFF);
874 }
875
876 return 0;
877 }
878
879 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
880 u32 length, u8 *data)
881 {
882 int i;
883 int ret;
884 u32 buf;
885 unsigned long timeout;
886
887 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
888
889 if (buf & OTP_PWR_DN_PWRDN_N_) {
890
891 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
892
893 timeout = jiffies + HZ;
894 do {
895 udelay(1);
896 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
897 if (time_after(jiffies, timeout)) {
898 netdev_warn(dev->net,
899 "timeout on OTP_PWR_DN completion");
900 return -EIO;
901 }
902 } while (buf & OTP_PWR_DN_PWRDN_N_);
903 }
904
905
906 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
907
908 for (i = 0; i < length; i++) {
909 ret = lan78xx_write_reg(dev, OTP_ADDR1,
910 ((offset + i) >> 8) & OTP_ADDR1_15_11);
911 ret = lan78xx_write_reg(dev, OTP_ADDR2,
912 ((offset + i) & OTP_ADDR2_10_3));
913 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
914 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
915 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
916
917 timeout = jiffies + HZ;
918 do {
919 udelay(1);
920 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
921 if (time_after(jiffies, timeout)) {
922 netdev_warn(dev->net,
923 "Timeout on OTP_STATUS completion");
924 return -EIO;
925 }
926 } while (buf & OTP_STATUS_BUSY_);
927 }
928
929 return 0;
930 }
931
932 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
933 u32 length, u8 *data)
934 {
935 u8 sig;
936 int ret;
937
938 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
939
940 if (ret == 0) {
941 if (sig == OTP_INDICATOR_2)
942 offset += 0x100;
943 else if (sig != OTP_INDICATOR_1)
944 ret = -EINVAL;
945 if (!ret)
946 ret = lan78xx_read_raw_otp(dev, offset, length, data);
947 }
948
949 return ret;
950 }
951
952 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
953 {
954 int i, ret;
955
956 for (i = 0; i < 100; i++) {
957 u32 dp_sel;
958
959 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
960 if (unlikely(ret < 0))
961 return -EIO;
962
963 if (dp_sel & DP_SEL_DPRDY_)
964 return 0;
965
966 usleep_range(40, 100);
967 }
968
969 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
970
971 return -EIO;
972 }
973
974 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
975 u32 addr, u32 length, u32 *buf)
976 {
977 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
978 u32 dp_sel;
979 int i, ret;
980
981 if (usb_autopm_get_interface(dev->intf) < 0)
982 return 0;
983
984 mutex_lock(&pdata->dataport_mutex);
985
986 ret = lan78xx_dataport_wait_not_busy(dev);
987 if (ret < 0)
988 goto done;
989
990 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
991
992 dp_sel &= ~DP_SEL_RSEL_MASK_;
993 dp_sel |= ram_select;
994 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
995
996 for (i = 0; i < length; i++) {
997 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
998
999 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1000
1001 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1002
1003 ret = lan78xx_dataport_wait_not_busy(dev);
1004 if (ret < 0)
1005 goto done;
1006 }
1007
1008 done:
1009 mutex_unlock(&pdata->dataport_mutex);
1010 usb_autopm_put_interface(dev->intf);
1011
1012 return ret;
1013 }
1014
1015 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1016 int index, u8 addr[ETH_ALEN])
1017 {
1018 u32 temp;
1019
1020 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1021 temp = addr[3];
1022 temp = addr[2] | (temp << 8);
1023 temp = addr[1] | (temp << 8);
1024 temp = addr[0] | (temp << 8);
1025 pdata->pfilter_table[index][1] = temp;
1026 temp = addr[5];
1027 temp = addr[4] | (temp << 8);
1028 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1029 pdata->pfilter_table[index][0] = temp;
1030 }
1031 }
1032
1033
1034 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1035 {
1036 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1037 }
1038
1039 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1040 {
1041 struct lan78xx_priv *pdata =
1042 container_of(param, struct lan78xx_priv, set_multicast);
1043 struct lan78xx_net *dev = pdata->dev;
1044 int i;
1045 int ret;
1046
1047 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1048 pdata->rfe_ctl);
1049
1050 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1051 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1052
1053 for (i = 1; i < NUM_OF_MAF; i++) {
1054 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1055 ret = lan78xx_write_reg(dev, MAF_LO(i),
1056 pdata->pfilter_table[i][1]);
1057 ret = lan78xx_write_reg(dev, MAF_HI(i),
1058 pdata->pfilter_table[i][0]);
1059 }
1060
1061 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1062 }
1063
1064 static void lan78xx_set_multicast(struct net_device *netdev)
1065 {
1066 struct lan78xx_net *dev = netdev_priv(netdev);
1067 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1068 unsigned long flags;
1069 int i;
1070
1071 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1072
1073 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1074 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1075
1076 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1077 pdata->mchash_table[i] = 0;
1078
1079 for (i = 1; i < NUM_OF_MAF; i++) {
1080 pdata->pfilter_table[i][0] =
1081 pdata->pfilter_table[i][1] = 0;
1082 }
1083
1084 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1085
1086 if (dev->net->flags & IFF_PROMISC) {
1087 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1088 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1089 } else {
1090 if (dev->net->flags & IFF_ALLMULTI) {
1091 netif_dbg(dev, drv, dev->net,
1092 "receive all multicast enabled");
1093 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1094 }
1095 }
1096
1097 if (netdev_mc_count(dev->net)) {
1098 struct netdev_hw_addr *ha;
1099 int i;
1100
1101 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1102
1103 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1104
1105 i = 1;
1106 netdev_for_each_mc_addr(ha, netdev) {
1107
1108 if (i < 33) {
1109 lan78xx_set_addr_filter(pdata, i, ha->addr);
1110 } else {
1111 u32 bitnum = lan78xx_hash(ha->addr);
1112
1113 pdata->mchash_table[bitnum / 32] |=
1114 (1 << (bitnum % 32));
1115 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1116 }
1117 i++;
1118 }
1119 }
1120
1121 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1122
1123
1124 schedule_work(&pdata->set_multicast);
1125 }
1126
1127 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1128 u16 lcladv, u16 rmtadv)
1129 {
1130 u32 flow = 0, fct_flow = 0;
1131 int ret;
1132 u8 cap;
1133
1134 if (dev->fc_autoneg)
1135 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1136 else
1137 cap = dev->fc_request_control;
1138
1139 if (cap & FLOW_CTRL_TX)
1140 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1141
1142 if (cap & FLOW_CTRL_RX)
1143 flow |= FLOW_CR_RX_FCEN_;
1144
1145 if (dev->udev->speed == USB_SPEED_SUPER)
1146 fct_flow = 0x817;
1147 else if (dev->udev->speed == USB_SPEED_HIGH)
1148 fct_flow = 0x211;
1149
1150 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1151 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1152 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1153
1154 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1155
1156
1157 ret = lan78xx_write_reg(dev, FLOW, flow);
1158
1159 return 0;
1160 }
1161
1162 static int lan78xx_link_reset(struct lan78xx_net *dev)
1163 {
1164 struct phy_device *phydev = dev->net->phydev;
1165 struct ethtool_link_ksettings ecmd;
1166 int ladv, radv, ret;
1167 u32 buf;
1168
1169
1170 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1171 if (unlikely(ret < 0))
1172 return -EIO;
1173
1174 phy_read_status(phydev);
1175
1176 if (!phydev->link && dev->link_on) {
1177 dev->link_on = false;
1178
1179
1180 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1181 if (unlikely(ret < 0))
1182 return -EIO;
1183 buf |= MAC_CR_RST_;
1184 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1185 if (unlikely(ret < 0))
1186 return -EIO;
1187
1188 del_timer(&dev->stat_monitor);
1189 } else if (phydev->link && !dev->link_on) {
1190 dev->link_on = true;
1191
1192 phy_ethtool_ksettings_get(phydev, &ecmd);
1193
1194 if (dev->udev->speed == USB_SPEED_SUPER) {
1195 if (ecmd.base.speed == 1000) {
1196
1197 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1198 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1199 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1200
1201 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1202 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1203 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1204 } else {
1205
1206 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1207 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1208 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1209 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1210 }
1211 }
1212
1213 ladv = phy_read(phydev, MII_ADVERTISE);
1214 if (ladv < 0)
1215 return ladv;
1216
1217 radv = phy_read(phydev, MII_LPA);
1218 if (radv < 0)
1219 return radv;
1220
1221 netif_dbg(dev, link, dev->net,
1222 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1223 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1224
1225 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1226 radv);
1227
1228 if (!timer_pending(&dev->stat_monitor)) {
1229 dev->delta = 1;
1230 mod_timer(&dev->stat_monitor,
1231 jiffies + STAT_UPDATE_TIMER);
1232 }
1233
1234 tasklet_schedule(&dev->bh);
1235 }
1236
1237 return ret;
1238 }
1239
1240
1241
1242
1243
1244
1245 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1246 {
1247 set_bit(work, &dev->flags);
1248 if (!schedule_delayed_work(&dev->wq, 0))
1249 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1250 }
1251
1252 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1253 {
1254 u32 intdata;
1255
1256 if (urb->actual_length != 4) {
1257 netdev_warn(dev->net,
1258 "unexpected urb length %d", urb->actual_length);
1259 return;
1260 }
1261
1262 intdata = get_unaligned_le32(urb->transfer_buffer);
1263
1264 if (intdata & INT_ENP_PHY_INT) {
1265 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1266 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1267
1268 if (dev->domain_data.phyirq > 0) {
1269 local_irq_disable();
1270 generic_handle_irq(dev->domain_data.phyirq);
1271 local_irq_enable();
1272 }
1273 } else
1274 netdev_warn(dev->net,
1275 "unexpected interrupt: 0x%08x\n", intdata);
1276 }
1277
1278 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1279 {
1280 return MAX_EEPROM_SIZE;
1281 }
1282
1283 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1284 struct ethtool_eeprom *ee, u8 *data)
1285 {
1286 struct lan78xx_net *dev = netdev_priv(netdev);
1287 int ret;
1288
1289 ret = usb_autopm_get_interface(dev->intf);
1290 if (ret)
1291 return ret;
1292
1293 ee->magic = LAN78XX_EEPROM_MAGIC;
1294
1295 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1296
1297 usb_autopm_put_interface(dev->intf);
1298
1299 return ret;
1300 }
1301
1302 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1303 struct ethtool_eeprom *ee, u8 *data)
1304 {
1305 struct lan78xx_net *dev = netdev_priv(netdev);
1306 int ret;
1307
1308 ret = usb_autopm_get_interface(dev->intf);
1309 if (ret)
1310 return ret;
1311
1312
1313
1314
1315 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1316 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1317 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1318 (ee->offset == 0) &&
1319 (ee->len == 512) &&
1320 (data[0] == OTP_INDICATOR_1))
1321 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1322
1323 usb_autopm_put_interface(dev->intf);
1324
1325 return ret;
1326 }
1327
1328 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1329 u8 *data)
1330 {
1331 if (stringset == ETH_SS_STATS)
1332 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1333 }
1334
1335 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1336 {
1337 if (sset == ETH_SS_STATS)
1338 return ARRAY_SIZE(lan78xx_gstrings);
1339 else
1340 return -EOPNOTSUPP;
1341 }
1342
1343 static void lan78xx_get_stats(struct net_device *netdev,
1344 struct ethtool_stats *stats, u64 *data)
1345 {
1346 struct lan78xx_net *dev = netdev_priv(netdev);
1347
1348 lan78xx_update_stats(dev);
1349
1350 mutex_lock(&dev->stats.access_lock);
1351 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1352 mutex_unlock(&dev->stats.access_lock);
1353 }
1354
1355 static void lan78xx_get_wol(struct net_device *netdev,
1356 struct ethtool_wolinfo *wol)
1357 {
1358 struct lan78xx_net *dev = netdev_priv(netdev);
1359 int ret;
1360 u32 buf;
1361 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1362
1363 if (usb_autopm_get_interface(dev->intf) < 0)
1364 return;
1365
1366 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1367 if (unlikely(ret < 0)) {
1368 wol->supported = 0;
1369 wol->wolopts = 0;
1370 } else {
1371 if (buf & USB_CFG_RMT_WKP_) {
1372 wol->supported = WAKE_ALL;
1373 wol->wolopts = pdata->wol;
1374 } else {
1375 wol->supported = 0;
1376 wol->wolopts = 0;
1377 }
1378 }
1379
1380 usb_autopm_put_interface(dev->intf);
1381 }
1382
1383 static int lan78xx_set_wol(struct net_device *netdev,
1384 struct ethtool_wolinfo *wol)
1385 {
1386 struct lan78xx_net *dev = netdev_priv(netdev);
1387 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1388 int ret;
1389
1390 ret = usb_autopm_get_interface(dev->intf);
1391 if (ret < 0)
1392 return ret;
1393
1394 if (wol->wolopts & ~WAKE_ALL)
1395 return -EINVAL;
1396
1397 pdata->wol = wol->wolopts;
1398
1399 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1400
1401 phy_ethtool_set_wol(netdev->phydev, wol);
1402
1403 usb_autopm_put_interface(dev->intf);
1404
1405 return ret;
1406 }
1407
1408 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1409 {
1410 struct lan78xx_net *dev = netdev_priv(net);
1411 struct phy_device *phydev = net->phydev;
1412 int ret;
1413 u32 buf;
1414
1415 ret = usb_autopm_get_interface(dev->intf);
1416 if (ret < 0)
1417 return ret;
1418
1419 ret = phy_ethtool_get_eee(phydev, edata);
1420 if (ret < 0)
1421 goto exit;
1422
1423 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1424 if (buf & MAC_CR_EEE_EN_) {
1425 edata->eee_enabled = true;
1426 edata->eee_active = !!(edata->advertised &
1427 edata->lp_advertised);
1428 edata->tx_lpi_enabled = true;
1429
1430 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1431 edata->tx_lpi_timer = buf;
1432 } else {
1433 edata->eee_enabled = false;
1434 edata->eee_active = false;
1435 edata->tx_lpi_enabled = false;
1436 edata->tx_lpi_timer = 0;
1437 }
1438
1439 ret = 0;
1440 exit:
1441 usb_autopm_put_interface(dev->intf);
1442
1443 return ret;
1444 }
1445
1446 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1447 {
1448 struct lan78xx_net *dev = netdev_priv(net);
1449 int ret;
1450 u32 buf;
1451
1452 ret = usb_autopm_get_interface(dev->intf);
1453 if (ret < 0)
1454 return ret;
1455
1456 if (edata->eee_enabled) {
1457 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458 buf |= MAC_CR_EEE_EN_;
1459 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1460
1461 phy_ethtool_set_eee(net->phydev, edata);
1462
1463 buf = (u32)edata->tx_lpi_timer;
1464 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1465 } else {
1466 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1467 buf &= ~MAC_CR_EEE_EN_;
1468 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1469 }
1470
1471 usb_autopm_put_interface(dev->intf);
1472
1473 return 0;
1474 }
1475
1476 static u32 lan78xx_get_link(struct net_device *net)
1477 {
1478 phy_read_status(net->phydev);
1479
1480 return net->phydev->link;
1481 }
1482
1483 static void lan78xx_get_drvinfo(struct net_device *net,
1484 struct ethtool_drvinfo *info)
1485 {
1486 struct lan78xx_net *dev = netdev_priv(net);
1487
1488 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1489 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1490 }
1491
1492 static u32 lan78xx_get_msglevel(struct net_device *net)
1493 {
1494 struct lan78xx_net *dev = netdev_priv(net);
1495
1496 return dev->msg_enable;
1497 }
1498
1499 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1500 {
1501 struct lan78xx_net *dev = netdev_priv(net);
1502
1503 dev->msg_enable = level;
1504 }
1505
1506 static int lan78xx_get_link_ksettings(struct net_device *net,
1507 struct ethtool_link_ksettings *cmd)
1508 {
1509 struct lan78xx_net *dev = netdev_priv(net);
1510 struct phy_device *phydev = net->phydev;
1511 int ret;
1512
1513 ret = usb_autopm_get_interface(dev->intf);
1514 if (ret < 0)
1515 return ret;
1516
1517 phy_ethtool_ksettings_get(phydev, cmd);
1518
1519 usb_autopm_put_interface(dev->intf);
1520
1521 return ret;
1522 }
1523
1524 static int lan78xx_set_link_ksettings(struct net_device *net,
1525 const struct ethtool_link_ksettings *cmd)
1526 {
1527 struct lan78xx_net *dev = netdev_priv(net);
1528 struct phy_device *phydev = net->phydev;
1529 int ret = 0;
1530 int temp;
1531
1532 ret = usb_autopm_get_interface(dev->intf);
1533 if (ret < 0)
1534 return ret;
1535
1536
1537 ret = phy_ethtool_ksettings_set(phydev, cmd);
1538
1539 if (!cmd->base.autoneg) {
1540
1541 temp = phy_read(phydev, MII_BMCR);
1542 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1543 mdelay(1);
1544 phy_write(phydev, MII_BMCR, temp);
1545 }
1546
1547 usb_autopm_put_interface(dev->intf);
1548
1549 return ret;
1550 }
1551
1552 static void lan78xx_get_pause(struct net_device *net,
1553 struct ethtool_pauseparam *pause)
1554 {
1555 struct lan78xx_net *dev = netdev_priv(net);
1556 struct phy_device *phydev = net->phydev;
1557 struct ethtool_link_ksettings ecmd;
1558
1559 phy_ethtool_ksettings_get(phydev, &ecmd);
1560
1561 pause->autoneg = dev->fc_autoneg;
1562
1563 if (dev->fc_request_control & FLOW_CTRL_TX)
1564 pause->tx_pause = 1;
1565
1566 if (dev->fc_request_control & FLOW_CTRL_RX)
1567 pause->rx_pause = 1;
1568 }
1569
1570 static int lan78xx_set_pause(struct net_device *net,
1571 struct ethtool_pauseparam *pause)
1572 {
1573 struct lan78xx_net *dev = netdev_priv(net);
1574 struct phy_device *phydev = net->phydev;
1575 struct ethtool_link_ksettings ecmd;
1576 int ret;
1577
1578 phy_ethtool_ksettings_get(phydev, &ecmd);
1579
1580 if (pause->autoneg && !ecmd.base.autoneg) {
1581 ret = -EINVAL;
1582 goto exit;
1583 }
1584
1585 dev->fc_request_control = 0;
1586 if (pause->rx_pause)
1587 dev->fc_request_control |= FLOW_CTRL_RX;
1588
1589 if (pause->tx_pause)
1590 dev->fc_request_control |= FLOW_CTRL_TX;
1591
1592 if (ecmd.base.autoneg) {
1593 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1594 u32 mii_adv;
1595
1596 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1597 ecmd.link_modes.advertising);
1598 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1599 ecmd.link_modes.advertising);
1600 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1601 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1602 linkmode_or(ecmd.link_modes.advertising, fc,
1603 ecmd.link_modes.advertising);
1604
1605 phy_ethtool_ksettings_set(phydev, &ecmd);
1606 }
1607
1608 dev->fc_autoneg = pause->autoneg;
1609
1610 ret = 0;
1611 exit:
1612 return ret;
1613 }
1614
1615 static int lan78xx_get_regs_len(struct net_device *netdev)
1616 {
1617 if (!netdev->phydev)
1618 return (sizeof(lan78xx_regs));
1619 else
1620 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1621 }
1622
1623 static void
1624 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1625 void *buf)
1626 {
1627 u32 *data = buf;
1628 int i, j;
1629 struct lan78xx_net *dev = netdev_priv(netdev);
1630
1631
1632 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1633 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1634
1635 if (!netdev->phydev)
1636 return;
1637
1638
1639 for (j = 0; j < 32; i++, j++)
1640 data[i] = phy_read(netdev->phydev, j);
1641 }
1642
1643 static const struct ethtool_ops lan78xx_ethtool_ops = {
1644 .get_link = lan78xx_get_link,
1645 .nway_reset = phy_ethtool_nway_reset,
1646 .get_drvinfo = lan78xx_get_drvinfo,
1647 .get_msglevel = lan78xx_get_msglevel,
1648 .set_msglevel = lan78xx_set_msglevel,
1649 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1650 .get_eeprom = lan78xx_ethtool_get_eeprom,
1651 .set_eeprom = lan78xx_ethtool_set_eeprom,
1652 .get_ethtool_stats = lan78xx_get_stats,
1653 .get_sset_count = lan78xx_get_sset_count,
1654 .get_strings = lan78xx_get_strings,
1655 .get_wol = lan78xx_get_wol,
1656 .set_wol = lan78xx_set_wol,
1657 .get_eee = lan78xx_get_eee,
1658 .set_eee = lan78xx_set_eee,
1659 .get_pauseparam = lan78xx_get_pause,
1660 .set_pauseparam = lan78xx_set_pause,
1661 .get_link_ksettings = lan78xx_get_link_ksettings,
1662 .set_link_ksettings = lan78xx_set_link_ksettings,
1663 .get_regs_len = lan78xx_get_regs_len,
1664 .get_regs = lan78xx_get_regs,
1665 };
1666
1667 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1668 {
1669 if (!netif_running(netdev))
1670 return -EINVAL;
1671
1672 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1673 }
1674
1675 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1676 {
1677 u32 addr_lo, addr_hi;
1678 int ret;
1679 u8 addr[6];
1680
1681 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1682 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1683
1684 addr[0] = addr_lo & 0xFF;
1685 addr[1] = (addr_lo >> 8) & 0xFF;
1686 addr[2] = (addr_lo >> 16) & 0xFF;
1687 addr[3] = (addr_lo >> 24) & 0xFF;
1688 addr[4] = addr_hi & 0xFF;
1689 addr[5] = (addr_hi >> 8) & 0xFF;
1690
1691 if (!is_valid_ether_addr(addr)) {
1692 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1693
1694 netif_dbg(dev, ifup, dev->net,
1695 "MAC address read from Device Tree");
1696 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1697 ETH_ALEN, addr) == 0) ||
1698 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1699 ETH_ALEN, addr) == 0)) &&
1700 is_valid_ether_addr(addr)) {
1701
1702 netif_dbg(dev, ifup, dev->net,
1703 "MAC address read from EEPROM");
1704 } else {
1705
1706 eth_random_addr(addr);
1707 netif_dbg(dev, ifup, dev->net,
1708 "MAC address set to random addr");
1709 }
1710
1711 addr_lo = addr[0] | (addr[1] << 8) |
1712 (addr[2] << 16) | (addr[3] << 24);
1713 addr_hi = addr[4] | (addr[5] << 8);
1714
1715 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1716 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1717 }
1718
1719 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1720 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1721
1722 ether_addr_copy(dev->net->dev_addr, addr);
1723 }
1724
1725
1726 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1727 {
1728 struct lan78xx_net *dev = bus->priv;
1729 u32 val, addr;
1730 int ret;
1731
1732 ret = usb_autopm_get_interface(dev->intf);
1733 if (ret < 0)
1734 return ret;
1735
1736 mutex_lock(&dev->phy_mutex);
1737
1738
1739 ret = lan78xx_phy_wait_not_busy(dev);
1740 if (ret < 0)
1741 goto done;
1742
1743
1744 addr = mii_access(phy_id, idx, MII_READ);
1745 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1746
1747 ret = lan78xx_phy_wait_not_busy(dev);
1748 if (ret < 0)
1749 goto done;
1750
1751 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1752
1753 ret = (int)(val & 0xFFFF);
1754
1755 done:
1756 mutex_unlock(&dev->phy_mutex);
1757 usb_autopm_put_interface(dev->intf);
1758
1759 return ret;
1760 }
1761
1762 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1763 u16 regval)
1764 {
1765 struct lan78xx_net *dev = bus->priv;
1766 u32 val, addr;
1767 int ret;
1768
1769 ret = usb_autopm_get_interface(dev->intf);
1770 if (ret < 0)
1771 return ret;
1772
1773 mutex_lock(&dev->phy_mutex);
1774
1775
1776 ret = lan78xx_phy_wait_not_busy(dev);
1777 if (ret < 0)
1778 goto done;
1779
1780 val = (u32)regval;
1781 ret = lan78xx_write_reg(dev, MII_DATA, val);
1782
1783
1784 addr = mii_access(phy_id, idx, MII_WRITE);
1785 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1786
1787 ret = lan78xx_phy_wait_not_busy(dev);
1788 if (ret < 0)
1789 goto done;
1790
1791 done:
1792 mutex_unlock(&dev->phy_mutex);
1793 usb_autopm_put_interface(dev->intf);
1794 return 0;
1795 }
1796
1797 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1798 {
1799 struct device_node *node;
1800 int ret;
1801
1802 dev->mdiobus = mdiobus_alloc();
1803 if (!dev->mdiobus) {
1804 netdev_err(dev->net, "can't allocate MDIO bus\n");
1805 return -ENOMEM;
1806 }
1807
1808 dev->mdiobus->priv = (void *)dev;
1809 dev->mdiobus->read = lan78xx_mdiobus_read;
1810 dev->mdiobus->write = lan78xx_mdiobus_write;
1811 dev->mdiobus->name = "lan78xx-mdiobus";
1812 dev->mdiobus->parent = &dev->udev->dev;
1813
1814 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1815 dev->udev->bus->busnum, dev->udev->devnum);
1816
1817 switch (dev->chipid) {
1818 case ID_REV_CHIP_ID_7800_:
1819 case ID_REV_CHIP_ID_7850_:
1820
1821 dev->mdiobus->phy_mask = ~(1 << 1);
1822 break;
1823 case ID_REV_CHIP_ID_7801_:
1824
1825 dev->mdiobus->phy_mask = ~(0xFF);
1826 break;
1827 }
1828
1829 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1830 ret = of_mdiobus_register(dev->mdiobus, node);
1831 of_node_put(node);
1832 if (ret) {
1833 netdev_err(dev->net, "can't register MDIO bus\n");
1834 goto exit1;
1835 }
1836
1837 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1838 return 0;
1839 exit1:
1840 mdiobus_free(dev->mdiobus);
1841 return ret;
1842 }
1843
1844 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1845 {
1846 mdiobus_unregister(dev->mdiobus);
1847 mdiobus_free(dev->mdiobus);
1848 }
1849
1850 static void lan78xx_link_status_change(struct net_device *net)
1851 {
1852 struct phy_device *phydev = net->phydev;
1853 int ret, temp;
1854
1855
1856
1857
1858
1859
1860 if (!phydev->autoneg && (phydev->speed == 100)) {
1861
1862 temp = phy_read(phydev, LAN88XX_INT_MASK);
1863 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1864 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1865
1866 temp = phy_read(phydev, MII_BMCR);
1867 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1868 phy_write(phydev, MII_BMCR, temp);
1869 temp |= BMCR_SPEED100;
1870 phy_write(phydev, MII_BMCR, temp);
1871
1872
1873 temp = phy_read(phydev, LAN88XX_INT_STS);
1874
1875
1876 temp = phy_read(phydev, LAN88XX_INT_MASK);
1877 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1878 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1879 }
1880 }
1881
1882 static int irq_map(struct irq_domain *d, unsigned int irq,
1883 irq_hw_number_t hwirq)
1884 {
1885 struct irq_domain_data *data = d->host_data;
1886
1887 irq_set_chip_data(irq, data);
1888 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1889 irq_set_noprobe(irq);
1890
1891 return 0;
1892 }
1893
1894 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1895 {
1896 irq_set_chip_and_handler(irq, NULL, NULL);
1897 irq_set_chip_data(irq, NULL);
1898 }
1899
1900 static const struct irq_domain_ops chip_domain_ops = {
1901 .map = irq_map,
1902 .unmap = irq_unmap,
1903 };
1904
1905 static void lan78xx_irq_mask(struct irq_data *irqd)
1906 {
1907 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1908
1909 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1910 }
1911
1912 static void lan78xx_irq_unmask(struct irq_data *irqd)
1913 {
1914 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1915
1916 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1917 }
1918
1919 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1920 {
1921 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1922
1923 mutex_lock(&data->irq_lock);
1924 }
1925
1926 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1927 {
1928 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1929 struct lan78xx_net *dev =
1930 container_of(data, struct lan78xx_net, domain_data);
1931 u32 buf;
1932 int ret;
1933
1934
1935
1936
1937 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1938 if (buf != data->irqenable)
1939 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1940
1941 mutex_unlock(&data->irq_lock);
1942 }
1943
1944 static struct irq_chip lan78xx_irqchip = {
1945 .name = "lan78xx-irqs",
1946 .irq_mask = lan78xx_irq_mask,
1947 .irq_unmask = lan78xx_irq_unmask,
1948 .irq_bus_lock = lan78xx_irq_bus_lock,
1949 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1950 };
1951
1952 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1953 {
1954 struct device_node *of_node;
1955 struct irq_domain *irqdomain;
1956 unsigned int irqmap = 0;
1957 u32 buf;
1958 int ret = 0;
1959
1960 of_node = dev->udev->dev.parent->of_node;
1961
1962 mutex_init(&dev->domain_data.irq_lock);
1963
1964 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1965 dev->domain_data.irqenable = buf;
1966
1967 dev->domain_data.irqchip = &lan78xx_irqchip;
1968 dev->domain_data.irq_handler = handle_simple_irq;
1969
1970 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1971 &chip_domain_ops, &dev->domain_data);
1972 if (irqdomain) {
1973
1974 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1975 if (!irqmap) {
1976 irq_domain_remove(irqdomain);
1977
1978 irqdomain = NULL;
1979 ret = -EINVAL;
1980 }
1981 } else {
1982 ret = -EINVAL;
1983 }
1984
1985 dev->domain_data.irqdomain = irqdomain;
1986 dev->domain_data.phyirq = irqmap;
1987
1988 return ret;
1989 }
1990
1991 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1992 {
1993 if (dev->domain_data.phyirq > 0) {
1994 irq_dispose_mapping(dev->domain_data.phyirq);
1995
1996 if (dev->domain_data.irqdomain)
1997 irq_domain_remove(dev->domain_data.irqdomain);
1998 }
1999 dev->domain_data.phyirq = 0;
2000 dev->domain_data.irqdomain = NULL;
2001 }
2002
2003 static int lan8835_fixup(struct phy_device *phydev)
2004 {
2005 int buf;
2006 int ret;
2007 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2008
2009
2010 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2011 buf &= ~0x1800;
2012 buf |= 0x0800;
2013 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2014
2015
2016 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2017 MAC_RGMII_ID_TXC_DELAY_EN_);
2018
2019
2020 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2021
2022 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2023
2024 return 1;
2025 }
2026
2027 static int ksz9031rnx_fixup(struct phy_device *phydev)
2028 {
2029 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2030
2031
2032
2033 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2034
2035 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2036
2037 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2038
2039 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2040
2041 return 1;
2042 }
2043
2044 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2045 {
2046 u32 buf;
2047 int ret;
2048 struct fixed_phy_status fphy_status = {
2049 .link = 1,
2050 .speed = SPEED_1000,
2051 .duplex = DUPLEX_FULL,
2052 };
2053 struct phy_device *phydev;
2054
2055 phydev = phy_find_first(dev->mdiobus);
2056 if (!phydev) {
2057 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2058 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2059 if (IS_ERR(phydev)) {
2060 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2061 return NULL;
2062 }
2063 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2064 dev->interface = PHY_INTERFACE_MODE_RGMII;
2065 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2066 MAC_RGMII_ID_TXC_DELAY_EN_);
2067 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2068 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2069 buf |= HW_CFG_CLK125_EN_;
2070 buf |= HW_CFG_REFCLK25_EN_;
2071 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2072 } else {
2073 if (!phydev->drv) {
2074 netdev_err(dev->net, "no PHY driver found\n");
2075 return NULL;
2076 }
2077 dev->interface = PHY_INTERFACE_MODE_RGMII;
2078
2079 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2080 ksz9031rnx_fixup);
2081 if (ret < 0) {
2082 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2083 return NULL;
2084 }
2085
2086 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2087 lan8835_fixup);
2088 if (ret < 0) {
2089 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2090 return NULL;
2091 }
2092
2093
2094 phydev->is_internal = false;
2095 }
2096 return phydev;
2097 }
2098
2099 static int lan78xx_phy_init(struct lan78xx_net *dev)
2100 {
2101 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2102 int ret;
2103 u32 mii_adv;
2104 struct phy_device *phydev;
2105
2106 switch (dev->chipid) {
2107 case ID_REV_CHIP_ID_7801_:
2108 phydev = lan7801_phy_init(dev);
2109 if (!phydev) {
2110 netdev_err(dev->net, "lan7801: PHY Init Failed");
2111 return -EIO;
2112 }
2113 break;
2114
2115 case ID_REV_CHIP_ID_7800_:
2116 case ID_REV_CHIP_ID_7850_:
2117 phydev = phy_find_first(dev->mdiobus);
2118 if (!phydev) {
2119 netdev_err(dev->net, "no PHY found\n");
2120 return -EIO;
2121 }
2122 phydev->is_internal = true;
2123 dev->interface = PHY_INTERFACE_MODE_GMII;
2124 break;
2125
2126 default:
2127 netdev_err(dev->net, "Unknown CHIP ID found\n");
2128 return -EIO;
2129 }
2130
2131
2132 if (dev->domain_data.phyirq > 0)
2133 phydev->irq = dev->domain_data.phyirq;
2134 else
2135 phydev->irq = 0;
2136 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2137
2138
2139 phydev->mdix = ETH_TP_MDI_AUTO;
2140
2141 ret = phy_connect_direct(dev->net, phydev,
2142 lan78xx_link_status_change,
2143 dev->interface);
2144 if (ret) {
2145 netdev_err(dev->net, "can't attach PHY to %s\n",
2146 dev->mdiobus->id);
2147 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2148 if (phy_is_pseudo_fixed_link(phydev)) {
2149 fixed_phy_unregister(phydev);
2150 } else {
2151 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2152 0xfffffff0);
2153 phy_unregister_fixup_for_uid(PHY_LAN8835,
2154 0xfffffff0);
2155 }
2156 }
2157 return -EIO;
2158 }
2159
2160
2161 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2162
2163
2164 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2165 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2166 phydev->advertising);
2167 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2168 phydev->advertising);
2169 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2170 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2171 linkmode_or(phydev->advertising, fc, phydev->advertising);
2172
2173 if (phydev->mdio.dev.of_node) {
2174 u32 reg;
2175 int len;
2176
2177 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2178 "microchip,led-modes",
2179 sizeof(u32));
2180 if (len >= 0) {
2181
2182 lan78xx_read_reg(dev, HW_CFG, ®);
2183 reg &= ~(HW_CFG_LED0_EN_ |
2184 HW_CFG_LED1_EN_ |
2185 HW_CFG_LED2_EN_ |
2186 HW_CFG_LED3_EN_);
2187 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2188 (len > 1) * HW_CFG_LED1_EN_ |
2189 (len > 2) * HW_CFG_LED2_EN_ |
2190 (len > 3) * HW_CFG_LED3_EN_;
2191 lan78xx_write_reg(dev, HW_CFG, reg);
2192 }
2193 }
2194
2195 genphy_config_aneg(phydev);
2196
2197 dev->fc_autoneg = phydev->autoneg;
2198
2199 return 0;
2200 }
2201
2202 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2203 {
2204 int ret = 0;
2205 u32 buf;
2206 bool rxenabled;
2207
2208 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2209
2210 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2211
2212 if (rxenabled) {
2213 buf &= ~MAC_RX_RXEN_;
2214 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2215 }
2216
2217
2218 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2219 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2220
2221 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2222
2223 if (rxenabled) {
2224 buf |= MAC_RX_RXEN_;
2225 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2226 }
2227
2228 return 0;
2229 }
2230
2231 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2232 {
2233 struct sk_buff *skb;
2234 unsigned long flags;
2235 int count = 0;
2236
2237 spin_lock_irqsave(&q->lock, flags);
2238 while (!skb_queue_empty(q)) {
2239 struct skb_data *entry;
2240 struct urb *urb;
2241 int ret;
2242
2243 skb_queue_walk(q, skb) {
2244 entry = (struct skb_data *)skb->cb;
2245 if (entry->state != unlink_start)
2246 goto found;
2247 }
2248 break;
2249 found:
2250 entry->state = unlink_start;
2251 urb = entry->urb;
2252
2253
2254
2255
2256
2257
2258
2259 usb_get_urb(urb);
2260 spin_unlock_irqrestore(&q->lock, flags);
2261
2262
2263
2264 ret = usb_unlink_urb(urb);
2265 if (ret != -EINPROGRESS && ret != 0)
2266 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2267 else
2268 count++;
2269 usb_put_urb(urb);
2270 spin_lock_irqsave(&q->lock, flags);
2271 }
2272 spin_unlock_irqrestore(&q->lock, flags);
2273 return count;
2274 }
2275
2276 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2277 {
2278 struct lan78xx_net *dev = netdev_priv(netdev);
2279 int ll_mtu = new_mtu + netdev->hard_header_len;
2280 int old_hard_mtu = dev->hard_mtu;
2281 int old_rx_urb_size = dev->rx_urb_size;
2282 int ret;
2283
2284
2285 if ((ll_mtu % dev->maxpacket) == 0)
2286 return -EDOM;
2287
2288 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2289
2290 netdev->mtu = new_mtu;
2291
2292 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2293 if (dev->rx_urb_size == old_hard_mtu) {
2294 dev->rx_urb_size = dev->hard_mtu;
2295 if (dev->rx_urb_size > old_rx_urb_size) {
2296 if (netif_running(dev->net)) {
2297 unlink_urbs(dev, &dev->rxq);
2298 tasklet_schedule(&dev->bh);
2299 }
2300 }
2301 }
2302
2303 return 0;
2304 }
2305
2306 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2307 {
2308 struct lan78xx_net *dev = netdev_priv(netdev);
2309 struct sockaddr *addr = p;
2310 u32 addr_lo, addr_hi;
2311 int ret;
2312
2313 if (netif_running(netdev))
2314 return -EBUSY;
2315
2316 if (!is_valid_ether_addr(addr->sa_data))
2317 return -EADDRNOTAVAIL;
2318
2319 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2320
2321 addr_lo = netdev->dev_addr[0] |
2322 netdev->dev_addr[1] << 8 |
2323 netdev->dev_addr[2] << 16 |
2324 netdev->dev_addr[3] << 24;
2325 addr_hi = netdev->dev_addr[4] |
2326 netdev->dev_addr[5] << 8;
2327
2328 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2329 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2330
2331
2332 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2333 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2334
2335 return 0;
2336 }
2337
2338
2339 static int lan78xx_set_features(struct net_device *netdev,
2340 netdev_features_t features)
2341 {
2342 struct lan78xx_net *dev = netdev_priv(netdev);
2343 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2344 unsigned long flags;
2345 int ret;
2346
2347 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2348
2349 if (features & NETIF_F_RXCSUM) {
2350 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2351 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2352 } else {
2353 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2354 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2355 }
2356
2357 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2358 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2359 else
2360 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2361
2362 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2363 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2364 else
2365 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2366
2367 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2368
2369 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2370
2371 return 0;
2372 }
2373
2374 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2375 {
2376 struct lan78xx_priv *pdata =
2377 container_of(param, struct lan78xx_priv, set_vlan);
2378 struct lan78xx_net *dev = pdata->dev;
2379
2380 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2381 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2382 }
2383
2384 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2385 __be16 proto, u16 vid)
2386 {
2387 struct lan78xx_net *dev = netdev_priv(netdev);
2388 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2389 u16 vid_bit_index;
2390 u16 vid_dword_index;
2391
2392 vid_dword_index = (vid >> 5) & 0x7F;
2393 vid_bit_index = vid & 0x1F;
2394
2395 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2396
2397
2398 schedule_work(&pdata->set_vlan);
2399
2400 return 0;
2401 }
2402
2403 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2404 __be16 proto, u16 vid)
2405 {
2406 struct lan78xx_net *dev = netdev_priv(netdev);
2407 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2408 u16 vid_bit_index;
2409 u16 vid_dword_index;
2410
2411 vid_dword_index = (vid >> 5) & 0x7F;
2412 vid_bit_index = vid & 0x1F;
2413
2414 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2415
2416
2417 schedule_work(&pdata->set_vlan);
2418
2419 return 0;
2420 }
2421
2422 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2423 {
2424 int ret;
2425 u32 buf;
2426 u32 regs[6] = { 0 };
2427
2428 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2429 if (buf & USB_CFG1_LTM_ENABLE_) {
2430 u8 temp[2];
2431
2432 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2433 if (temp[0] == 24) {
2434 ret = lan78xx_read_raw_eeprom(dev,
2435 temp[1] * 2,
2436 24,
2437 (u8 *)regs);
2438 if (ret < 0)
2439 return;
2440 }
2441 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2442 if (temp[0] == 24) {
2443 ret = lan78xx_read_raw_otp(dev,
2444 temp[1] * 2,
2445 24,
2446 (u8 *)regs);
2447 if (ret < 0)
2448 return;
2449 }
2450 }
2451 }
2452
2453 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2454 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2455 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2456 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2457 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2458 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2459 }
2460
2461 static int lan78xx_reset(struct lan78xx_net *dev)
2462 {
2463 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2464 u32 buf;
2465 int ret = 0;
2466 unsigned long timeout;
2467 u8 sig;
2468
2469 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2470 buf |= HW_CFG_LRST_;
2471 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2472
2473 timeout = jiffies + HZ;
2474 do {
2475 mdelay(1);
2476 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2477 if (time_after(jiffies, timeout)) {
2478 netdev_warn(dev->net,
2479 "timeout on completion of LiteReset");
2480 return -EIO;
2481 }
2482 } while (buf & HW_CFG_LRST_);
2483
2484 lan78xx_init_mac_address(dev);
2485
2486
2487 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2488 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2489 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2490
2491
2492 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2493 buf |= USB_CFG_BIR_;
2494 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2495
2496
2497 lan78xx_init_ltm(dev);
2498
2499 if (dev->udev->speed == USB_SPEED_SUPER) {
2500 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2501 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2502 dev->rx_qlen = 4;
2503 dev->tx_qlen = 4;
2504 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2505 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2506 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2507 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2508 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2509 } else {
2510 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2511 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2512 dev->rx_qlen = 4;
2513 dev->tx_qlen = 4;
2514 }
2515
2516 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2517 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2518
2519 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2520 buf |= HW_CFG_MEF_;
2521 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2522
2523 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2524 buf |= USB_CFG_BCE_;
2525 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2526
2527
2528 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2529 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2530
2531 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2532 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2533
2534 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2535 ret = lan78xx_write_reg(dev, FLOW, 0);
2536 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2537
2538
2539 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2540 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2541 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2542
2543
2544 lan78xx_set_features(dev->net, dev->net->features);
2545
2546 lan78xx_set_multicast(dev->net);
2547
2548
2549 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2550 buf |= PMT_CTL_PHY_RST_;
2551 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2552
2553 timeout = jiffies + HZ;
2554 do {
2555 mdelay(1);
2556 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2557 if (time_after(jiffies, timeout)) {
2558 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2559 return -EIO;
2560 }
2561 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2562
2563 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2564
2565 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2566 buf &= ~MAC_CR_GMII_EN_;
2567
2568 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2569 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2570 if (!ret && sig != EEPROM_INDICATOR) {
2571
2572 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2573 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2574 }
2575 }
2576 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2577
2578 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2579 buf |= MAC_TX_TXEN_;
2580 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2581
2582 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2583 buf |= FCT_TX_CTL_EN_;
2584 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2585
2586 ret = lan78xx_set_rx_max_frame_length(dev,
2587 dev->net->mtu + VLAN_ETH_HLEN);
2588
2589 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2590 buf |= MAC_RX_RXEN_;
2591 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2592
2593 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2594 buf |= FCT_RX_CTL_EN_;
2595 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2596
2597 return 0;
2598 }
2599
2600 static void lan78xx_init_stats(struct lan78xx_net *dev)
2601 {
2602 u32 *p;
2603 int i;
2604
2605
2606
2607
2608 p = (u32 *)&dev->stats.rollover_max;
2609 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2610 p[i] = 0xFFFFF;
2611
2612 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2613 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2614 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2615 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2616 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2617 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2618 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2619 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2620 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2621 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2622
2623 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2624 }
2625
2626 static int lan78xx_open(struct net_device *net)
2627 {
2628 struct lan78xx_net *dev = netdev_priv(net);
2629 int ret;
2630
2631 ret = usb_autopm_get_interface(dev->intf);
2632 if (ret < 0)
2633 goto out;
2634
2635 phy_start(net->phydev);
2636
2637 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2638
2639
2640 if (dev->urb_intr) {
2641 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2642 if (ret < 0) {
2643 netif_err(dev, ifup, dev->net,
2644 "intr submit %d\n", ret);
2645 goto done;
2646 }
2647 }
2648
2649 lan78xx_init_stats(dev);
2650
2651 set_bit(EVENT_DEV_OPEN, &dev->flags);
2652
2653 netif_start_queue(net);
2654
2655 dev->link_on = false;
2656
2657 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2658 done:
2659 usb_autopm_put_interface(dev->intf);
2660
2661 out:
2662 return ret;
2663 }
2664
2665 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2666 {
2667 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2668 DECLARE_WAITQUEUE(wait, current);
2669 int temp;
2670
2671
2672 add_wait_queue(&unlink_wakeup, &wait);
2673 set_current_state(TASK_UNINTERRUPTIBLE);
2674 dev->wait = &unlink_wakeup;
2675 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2676
2677
2678 while (!skb_queue_empty(&dev->rxq) &&
2679 !skb_queue_empty(&dev->txq) &&
2680 !skb_queue_empty(&dev->done)) {
2681 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2682 set_current_state(TASK_UNINTERRUPTIBLE);
2683 netif_dbg(dev, ifdown, dev->net,
2684 "waited for %d urb completions\n", temp);
2685 }
2686 set_current_state(TASK_RUNNING);
2687 dev->wait = NULL;
2688 remove_wait_queue(&unlink_wakeup, &wait);
2689 }
2690
2691 static int lan78xx_stop(struct net_device *net)
2692 {
2693 struct lan78xx_net *dev = netdev_priv(net);
2694
2695 if (timer_pending(&dev->stat_monitor))
2696 del_timer_sync(&dev->stat_monitor);
2697
2698 if (net->phydev)
2699 phy_stop(net->phydev);
2700
2701 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2702 netif_stop_queue(net);
2703
2704 netif_info(dev, ifdown, dev->net,
2705 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2706 net->stats.rx_packets, net->stats.tx_packets,
2707 net->stats.rx_errors, net->stats.tx_errors);
2708
2709 lan78xx_terminate_urbs(dev);
2710
2711 usb_kill_urb(dev->urb_intr);
2712
2713 skb_queue_purge(&dev->rxq_pause);
2714
2715
2716
2717
2718
2719 dev->flags = 0;
2720 cancel_delayed_work_sync(&dev->wq);
2721 tasklet_kill(&dev->bh);
2722
2723 usb_autopm_put_interface(dev->intf);
2724
2725 return 0;
2726 }
2727
2728 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2729 struct sk_buff *skb, gfp_t flags)
2730 {
2731 u32 tx_cmd_a, tx_cmd_b;
2732 void *ptr;
2733
2734 if (skb_cow_head(skb, TX_OVERHEAD)) {
2735 dev_kfree_skb_any(skb);
2736 return NULL;
2737 }
2738
2739 if (skb_linearize(skb)) {
2740 dev_kfree_skb_any(skb);
2741 return NULL;
2742 }
2743
2744 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2745
2746 if (skb->ip_summed == CHECKSUM_PARTIAL)
2747 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2748
2749 tx_cmd_b = 0;
2750 if (skb_is_gso(skb)) {
2751 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2752
2753 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2754
2755 tx_cmd_a |= TX_CMD_A_LSO_;
2756 }
2757
2758 if (skb_vlan_tag_present(skb)) {
2759 tx_cmd_a |= TX_CMD_A_IVTG_;
2760 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2761 }
2762
2763 ptr = skb_push(skb, 8);
2764 put_unaligned_le32(tx_cmd_a, ptr);
2765 put_unaligned_le32(tx_cmd_b, ptr + 4);
2766
2767 return skb;
2768 }
2769
2770 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2771 struct sk_buff_head *list, enum skb_state state)
2772 {
2773 unsigned long flags;
2774 enum skb_state old_state;
2775 struct skb_data *entry = (struct skb_data *)skb->cb;
2776
2777 spin_lock_irqsave(&list->lock, flags);
2778 old_state = entry->state;
2779 entry->state = state;
2780
2781 __skb_unlink(skb, list);
2782 spin_unlock(&list->lock);
2783 spin_lock(&dev->done.lock);
2784
2785 __skb_queue_tail(&dev->done, skb);
2786 if (skb_queue_len(&dev->done) == 1)
2787 tasklet_schedule(&dev->bh);
2788 spin_unlock_irqrestore(&dev->done.lock, flags);
2789
2790 return old_state;
2791 }
2792
2793 static void tx_complete(struct urb *urb)
2794 {
2795 struct sk_buff *skb = (struct sk_buff *)urb->context;
2796 struct skb_data *entry = (struct skb_data *)skb->cb;
2797 struct lan78xx_net *dev = entry->dev;
2798
2799 if (urb->status == 0) {
2800 dev->net->stats.tx_packets += entry->num_of_packet;
2801 dev->net->stats.tx_bytes += entry->length;
2802 } else {
2803 dev->net->stats.tx_errors++;
2804
2805 switch (urb->status) {
2806 case -EPIPE:
2807 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2808 break;
2809
2810
2811 case -ECONNRESET:
2812 case -ESHUTDOWN:
2813 break;
2814
2815 case -EPROTO:
2816 case -ETIME:
2817 case -EILSEQ:
2818 netif_stop_queue(dev->net);
2819 break;
2820 default:
2821 netif_dbg(dev, tx_err, dev->net,
2822 "tx err %d\n", entry->urb->status);
2823 break;
2824 }
2825 }
2826
2827 usb_autopm_put_interface_async(dev->intf);
2828
2829 defer_bh(dev, skb, &dev->txq, tx_done);
2830 }
2831
2832 static void lan78xx_queue_skb(struct sk_buff_head *list,
2833 struct sk_buff *newsk, enum skb_state state)
2834 {
2835 struct skb_data *entry = (struct skb_data *)newsk->cb;
2836
2837 __skb_queue_tail(list, newsk);
2838 entry->state = state;
2839 }
2840
2841 static netdev_tx_t
2842 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2843 {
2844 struct lan78xx_net *dev = netdev_priv(net);
2845 struct sk_buff *skb2 = NULL;
2846
2847 if (skb) {
2848 skb_tx_timestamp(skb);
2849 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2850 }
2851
2852 if (skb2) {
2853 skb_queue_tail(&dev->txq_pend, skb2);
2854
2855
2856 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2857 (skb_queue_len(&dev->txq_pend) > 10))
2858 netif_stop_queue(net);
2859 } else {
2860 netif_dbg(dev, tx_err, dev->net,
2861 "lan78xx_tx_prep return NULL\n");
2862 dev->net->stats.tx_errors++;
2863 dev->net->stats.tx_dropped++;
2864 }
2865
2866 tasklet_schedule(&dev->bh);
2867
2868 return NETDEV_TX_OK;
2869 }
2870
2871 static int
2872 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2873 {
2874 int tmp;
2875 struct usb_host_interface *alt = NULL;
2876 struct usb_host_endpoint *in = NULL, *out = NULL;
2877 struct usb_host_endpoint *status = NULL;
2878
2879 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2880 unsigned ep;
2881
2882 in = NULL;
2883 out = NULL;
2884 status = NULL;
2885 alt = intf->altsetting + tmp;
2886
2887 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2888 struct usb_host_endpoint *e;
2889 int intr = 0;
2890
2891 e = alt->endpoint + ep;
2892 switch (e->desc.bmAttributes) {
2893 case USB_ENDPOINT_XFER_INT:
2894 if (!usb_endpoint_dir_in(&e->desc))
2895 continue;
2896 intr = 1;
2897
2898 case USB_ENDPOINT_XFER_BULK:
2899 break;
2900 default:
2901 continue;
2902 }
2903 if (usb_endpoint_dir_in(&e->desc)) {
2904 if (!intr && !in)
2905 in = e;
2906 else if (intr && !status)
2907 status = e;
2908 } else {
2909 if (!out)
2910 out = e;
2911 }
2912 }
2913 if (in && out)
2914 break;
2915 }
2916 if (!alt || !in || !out)
2917 return -EINVAL;
2918
2919 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2920 in->desc.bEndpointAddress &
2921 USB_ENDPOINT_NUMBER_MASK);
2922 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2923 out->desc.bEndpointAddress &
2924 USB_ENDPOINT_NUMBER_MASK);
2925 dev->ep_intr = status;
2926
2927 return 0;
2928 }
2929
2930 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2931 {
2932 struct lan78xx_priv *pdata = NULL;
2933 int ret;
2934 int i;
2935
2936 ret = lan78xx_get_endpoints(dev, intf);
2937 if (ret) {
2938 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2939 ret);
2940 return ret;
2941 }
2942
2943 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2944
2945 pdata = (struct lan78xx_priv *)(dev->data[0]);
2946 if (!pdata) {
2947 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2948 return -ENOMEM;
2949 }
2950
2951 pdata->dev = dev;
2952
2953 spin_lock_init(&pdata->rfe_ctl_lock);
2954 mutex_init(&pdata->dataport_mutex);
2955
2956 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2957
2958 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2959 pdata->vlan_table[i] = 0;
2960
2961 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2962
2963 dev->net->features = 0;
2964
2965 if (DEFAULT_TX_CSUM_ENABLE)
2966 dev->net->features |= NETIF_F_HW_CSUM;
2967
2968 if (DEFAULT_RX_CSUM_ENABLE)
2969 dev->net->features |= NETIF_F_RXCSUM;
2970
2971 if (DEFAULT_TSO_CSUM_ENABLE)
2972 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2973
2974 if (DEFAULT_VLAN_RX_OFFLOAD)
2975 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2976
2977 if (DEFAULT_VLAN_FILTER_ENABLE)
2978 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2979
2980 dev->net->hw_features = dev->net->features;
2981
2982 ret = lan78xx_setup_irq_domain(dev);
2983 if (ret < 0) {
2984 netdev_warn(dev->net,
2985 "lan78xx_setup_irq_domain() failed : %d", ret);
2986 goto out1;
2987 }
2988
2989 dev->net->hard_header_len += TX_OVERHEAD;
2990 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2991
2992
2993 ret = lan78xx_reset(dev);
2994 if (ret) {
2995 netdev_warn(dev->net, "Registers INIT FAILED....");
2996 goto out2;
2997 }
2998
2999 ret = lan78xx_mdio_init(dev);
3000 if (ret) {
3001 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3002 goto out2;
3003 }
3004
3005 dev->net->flags |= IFF_MULTICAST;
3006
3007 pdata->wol = WAKE_MAGIC;
3008
3009 return ret;
3010
3011 out2:
3012 lan78xx_remove_irq_domain(dev);
3013
3014 out1:
3015 netdev_warn(dev->net, "Bind routine FAILED");
3016 cancel_work_sync(&pdata->set_multicast);
3017 cancel_work_sync(&pdata->set_vlan);
3018 kfree(pdata);
3019 return ret;
3020 }
3021
3022 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3023 {
3024 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3025
3026 lan78xx_remove_irq_domain(dev);
3027
3028 lan78xx_remove_mdio(dev);
3029
3030 if (pdata) {
3031 cancel_work_sync(&pdata->set_multicast);
3032 cancel_work_sync(&pdata->set_vlan);
3033 netif_dbg(dev, ifdown, dev->net, "free pdata");
3034 kfree(pdata);
3035 pdata = NULL;
3036 dev->data[0] = 0;
3037 }
3038 }
3039
3040 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3041 struct sk_buff *skb,
3042 u32 rx_cmd_a, u32 rx_cmd_b)
3043 {
3044
3045
3046
3047 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3048 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3049 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3050 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3051 skb->ip_summed = CHECKSUM_NONE;
3052 } else {
3053 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3054 skb->ip_summed = CHECKSUM_COMPLETE;
3055 }
3056 }
3057
3058 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3059 struct sk_buff *skb,
3060 u32 rx_cmd_a, u32 rx_cmd_b)
3061 {
3062 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3063 (rx_cmd_a & RX_CMD_A_FVTG_))
3064 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3065 (rx_cmd_b & 0xffff));
3066 }
3067
3068 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3069 {
3070 int status;
3071
3072 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3073 skb_queue_tail(&dev->rxq_pause, skb);
3074 return;
3075 }
3076
3077 dev->net->stats.rx_packets++;
3078 dev->net->stats.rx_bytes += skb->len;
3079
3080 skb->protocol = eth_type_trans(skb, dev->net);
3081
3082 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3083 skb->len + sizeof(struct ethhdr), skb->protocol);
3084 memset(skb->cb, 0, sizeof(struct skb_data));
3085
3086 if (skb_defer_rx_timestamp(skb))
3087 return;
3088
3089 status = netif_rx(skb);
3090 if (status != NET_RX_SUCCESS)
3091 netif_dbg(dev, rx_err, dev->net,
3092 "netif_rx status %d\n", status);
3093 }
3094
3095 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3096 {
3097 if (skb->len < dev->net->hard_header_len)
3098 return 0;
3099
3100 while (skb->len > 0) {
3101 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3102 u16 rx_cmd_c;
3103 struct sk_buff *skb2;
3104 unsigned char *packet;
3105
3106 rx_cmd_a = get_unaligned_le32(skb->data);
3107 skb_pull(skb, sizeof(rx_cmd_a));
3108
3109 rx_cmd_b = get_unaligned_le32(skb->data);
3110 skb_pull(skb, sizeof(rx_cmd_b));
3111
3112 rx_cmd_c = get_unaligned_le16(skb->data);
3113 skb_pull(skb, sizeof(rx_cmd_c));
3114
3115 packet = skb->data;
3116
3117
3118 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3119 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3120
3121 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3122 netif_dbg(dev, rx_err, dev->net,
3123 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3124 } else {
3125
3126 if (skb->len == size) {
3127 lan78xx_rx_csum_offload(dev, skb,
3128 rx_cmd_a, rx_cmd_b);
3129 lan78xx_rx_vlan_offload(dev, skb,
3130 rx_cmd_a, rx_cmd_b);
3131
3132 skb_trim(skb, skb->len - 4);
3133 skb->truesize = size + sizeof(struct sk_buff);
3134
3135 return 1;
3136 }
3137
3138 skb2 = skb_clone(skb, GFP_ATOMIC);
3139 if (unlikely(!skb2)) {
3140 netdev_warn(dev->net, "Error allocating skb");
3141 return 0;
3142 }
3143
3144 skb2->len = size;
3145 skb2->data = packet;
3146 skb_set_tail_pointer(skb2, size);
3147
3148 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3149 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3150
3151 skb_trim(skb2, skb2->len - 4);
3152 skb2->truesize = size + sizeof(struct sk_buff);
3153
3154 lan78xx_skb_return(dev, skb2);
3155 }
3156
3157 skb_pull(skb, size);
3158
3159
3160 if (skb->len)
3161 skb_pull(skb, align_count);
3162 }
3163
3164 return 1;
3165 }
3166
3167 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3168 {
3169 if (!lan78xx_rx(dev, skb)) {
3170 dev->net->stats.rx_errors++;
3171 goto done;
3172 }
3173
3174 if (skb->len) {
3175 lan78xx_skb_return(dev, skb);
3176 return;
3177 }
3178
3179 netif_dbg(dev, rx_err, dev->net, "drop\n");
3180 dev->net->stats.rx_errors++;
3181 done:
3182 skb_queue_tail(&dev->done, skb);
3183 }
3184
3185 static void rx_complete(struct urb *urb);
3186
3187 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3188 {
3189 struct sk_buff *skb;
3190 struct skb_data *entry;
3191 unsigned long lockflags;
3192 size_t size = dev->rx_urb_size;
3193 int ret = 0;
3194
3195 skb = netdev_alloc_skb_ip_align(dev->net, size);
3196 if (!skb) {
3197 usb_free_urb(urb);
3198 return -ENOMEM;
3199 }
3200
3201 entry = (struct skb_data *)skb->cb;
3202 entry->urb = urb;
3203 entry->dev = dev;
3204 entry->length = 0;
3205
3206 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3207 skb->data, size, rx_complete, skb);
3208
3209 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3210
3211 if (netif_device_present(dev->net) &&
3212 netif_running(dev->net) &&
3213 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3214 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3215 ret = usb_submit_urb(urb, GFP_ATOMIC);
3216 switch (ret) {
3217 case 0:
3218 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3219 break;
3220 case -EPIPE:
3221 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3222 break;
3223 case -ENODEV:
3224 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3225 netif_device_detach(dev->net);
3226 break;
3227 case -EHOSTUNREACH:
3228 ret = -ENOLINK;
3229 break;
3230 default:
3231 netif_dbg(dev, rx_err, dev->net,
3232 "rx submit, %d\n", ret);
3233 tasklet_schedule(&dev->bh);
3234 }
3235 } else {
3236 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3237 ret = -ENOLINK;
3238 }
3239 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3240 if (ret) {
3241 dev_kfree_skb_any(skb);
3242 usb_free_urb(urb);
3243 }
3244 return ret;
3245 }
3246
3247 static void rx_complete(struct urb *urb)
3248 {
3249 struct sk_buff *skb = (struct sk_buff *)urb->context;
3250 struct skb_data *entry = (struct skb_data *)skb->cb;
3251 struct lan78xx_net *dev = entry->dev;
3252 int urb_status = urb->status;
3253 enum skb_state state;
3254
3255 skb_put(skb, urb->actual_length);
3256 state = rx_done;
3257 entry->urb = NULL;
3258
3259 switch (urb_status) {
3260 case 0:
3261 if (skb->len < dev->net->hard_header_len) {
3262 state = rx_cleanup;
3263 dev->net->stats.rx_errors++;
3264 dev->net->stats.rx_length_errors++;
3265 netif_dbg(dev, rx_err, dev->net,
3266 "rx length %d\n", skb->len);
3267 }
3268 usb_mark_last_busy(dev->udev);
3269 break;
3270 case -EPIPE:
3271 dev->net->stats.rx_errors++;
3272 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3273
3274 case -ECONNRESET:
3275 case -ESHUTDOWN:
3276 netif_dbg(dev, ifdown, dev->net,
3277 "rx shutdown, code %d\n", urb_status);
3278 state = rx_cleanup;
3279 entry->urb = urb;
3280 urb = NULL;
3281 break;
3282 case -EPROTO:
3283 case -ETIME:
3284 case -EILSEQ:
3285 dev->net->stats.rx_errors++;
3286 state = rx_cleanup;
3287 entry->urb = urb;
3288 urb = NULL;
3289 break;
3290
3291
3292 case -EOVERFLOW:
3293 dev->net->stats.rx_over_errors++;
3294
3295
3296 default:
3297 state = rx_cleanup;
3298 dev->net->stats.rx_errors++;
3299 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3300 break;
3301 }
3302
3303 state = defer_bh(dev, skb, &dev->rxq, state);
3304
3305 if (urb) {
3306 if (netif_running(dev->net) &&
3307 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3308 state != unlink_start) {
3309 rx_submit(dev, urb, GFP_ATOMIC);
3310 return;
3311 }
3312 usb_free_urb(urb);
3313 }
3314 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3315 }
3316
3317 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3318 {
3319 int length;
3320 struct urb *urb = NULL;
3321 struct skb_data *entry;
3322 unsigned long flags;
3323 struct sk_buff_head *tqp = &dev->txq_pend;
3324 struct sk_buff *skb, *skb2;
3325 int ret;
3326 int count, pos;
3327 int skb_totallen, pkt_cnt;
3328
3329 skb_totallen = 0;
3330 pkt_cnt = 0;
3331 count = 0;
3332 length = 0;
3333 spin_lock_irqsave(&tqp->lock, flags);
3334 skb_queue_walk(tqp, skb) {
3335 if (skb_is_gso(skb)) {
3336 if (!skb_queue_is_first(tqp, skb)) {
3337
3338 break;
3339 }
3340 count = 1;
3341 length = skb->len - TX_OVERHEAD;
3342 __skb_unlink(skb, tqp);
3343 spin_unlock_irqrestore(&tqp->lock, flags);
3344 goto gso_skb;
3345 }
3346
3347 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3348 break;
3349 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3350 pkt_cnt++;
3351 }
3352 spin_unlock_irqrestore(&tqp->lock, flags);
3353
3354
3355 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3356 if (!skb)
3357 goto drop;
3358
3359 skb_put(skb, skb_totallen);
3360
3361 for (count = pos = 0; count < pkt_cnt; count++) {
3362 skb2 = skb_dequeue(tqp);
3363 if (skb2) {
3364 length += (skb2->len - TX_OVERHEAD);
3365 memcpy(skb->data + pos, skb2->data, skb2->len);
3366 pos += roundup(skb2->len, sizeof(u32));
3367 dev_kfree_skb(skb2);
3368 }
3369 }
3370
3371 gso_skb:
3372 urb = usb_alloc_urb(0, GFP_ATOMIC);
3373 if (!urb)
3374 goto drop;
3375
3376 entry = (struct skb_data *)skb->cb;
3377 entry->urb = urb;
3378 entry->dev = dev;
3379 entry->length = length;
3380 entry->num_of_packet = count;
3381
3382 spin_lock_irqsave(&dev->txq.lock, flags);
3383 ret = usb_autopm_get_interface_async(dev->intf);
3384 if (ret < 0) {
3385 spin_unlock_irqrestore(&dev->txq.lock, flags);
3386 goto drop;
3387 }
3388
3389 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3390 skb->data, skb->len, tx_complete, skb);
3391
3392 if (length % dev->maxpacket == 0) {
3393
3394 urb->transfer_flags |= URB_ZERO_PACKET;
3395 }
3396
3397 #ifdef CONFIG_PM
3398
3399 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3400
3401 usb_anchor_urb(urb, &dev->deferred);
3402
3403 netif_stop_queue(dev->net);
3404 usb_put_urb(urb);
3405 spin_unlock_irqrestore(&dev->txq.lock, flags);
3406 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3407 return;
3408 }
3409 #endif
3410
3411 ret = usb_submit_urb(urb, GFP_ATOMIC);
3412 switch (ret) {
3413 case 0:
3414 netif_trans_update(dev->net);
3415 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3416 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3417 netif_stop_queue(dev->net);
3418 break;
3419 case -EPIPE:
3420 netif_stop_queue(dev->net);
3421 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3422 usb_autopm_put_interface_async(dev->intf);
3423 break;
3424 default:
3425 usb_autopm_put_interface_async(dev->intf);
3426 netif_dbg(dev, tx_err, dev->net,
3427 "tx: submit urb err %d\n", ret);
3428 break;
3429 }
3430
3431 spin_unlock_irqrestore(&dev->txq.lock, flags);
3432
3433 if (ret) {
3434 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3435 drop:
3436 dev->net->stats.tx_dropped++;
3437 if (skb)
3438 dev_kfree_skb_any(skb);
3439 usb_free_urb(urb);
3440 } else
3441 netif_dbg(dev, tx_queued, dev->net,
3442 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3443 }
3444
3445 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3446 {
3447 struct urb *urb;
3448 int i;
3449
3450 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3451 for (i = 0; i < 10; i++) {
3452 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3453 break;
3454 urb = usb_alloc_urb(0, GFP_ATOMIC);
3455 if (urb)
3456 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3457 return;
3458 }
3459
3460 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3461 tasklet_schedule(&dev->bh);
3462 }
3463 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3464 netif_wake_queue(dev->net);
3465 }
3466
3467 static void lan78xx_bh(unsigned long param)
3468 {
3469 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3470 struct sk_buff *skb;
3471 struct skb_data *entry;
3472
3473 while ((skb = skb_dequeue(&dev->done))) {
3474 entry = (struct skb_data *)(skb->cb);
3475 switch (entry->state) {
3476 case rx_done:
3477 entry->state = rx_cleanup;
3478 rx_process(dev, skb);
3479 continue;
3480 case tx_done:
3481 usb_free_urb(entry->urb);
3482 dev_kfree_skb(skb);
3483 continue;
3484 case rx_cleanup:
3485 usb_free_urb(entry->urb);
3486 dev_kfree_skb(skb);
3487 continue;
3488 default:
3489 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3490 return;
3491 }
3492 }
3493
3494 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3495
3496 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3497 dev->delta = 1;
3498 mod_timer(&dev->stat_monitor,
3499 jiffies + STAT_UPDATE_TIMER);
3500 }
3501
3502 if (!skb_queue_empty(&dev->txq_pend))
3503 lan78xx_tx_bh(dev);
3504
3505 if (!timer_pending(&dev->delay) &&
3506 !test_bit(EVENT_RX_HALT, &dev->flags))
3507 lan78xx_rx_bh(dev);
3508 }
3509 }
3510
3511 static void lan78xx_delayedwork(struct work_struct *work)
3512 {
3513 int status;
3514 struct lan78xx_net *dev;
3515
3516 dev = container_of(work, struct lan78xx_net, wq.work);
3517
3518 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3519 unlink_urbs(dev, &dev->txq);
3520 status = usb_autopm_get_interface(dev->intf);
3521 if (status < 0)
3522 goto fail_pipe;
3523 status = usb_clear_halt(dev->udev, dev->pipe_out);
3524 usb_autopm_put_interface(dev->intf);
3525 if (status < 0 &&
3526 status != -EPIPE &&
3527 status != -ESHUTDOWN) {
3528 if (netif_msg_tx_err(dev))
3529 fail_pipe:
3530 netdev_err(dev->net,
3531 "can't clear tx halt, status %d\n",
3532 status);
3533 } else {
3534 clear_bit(EVENT_TX_HALT, &dev->flags);
3535 if (status != -ESHUTDOWN)
3536 netif_wake_queue(dev->net);
3537 }
3538 }
3539 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3540 unlink_urbs(dev, &dev->rxq);
3541 status = usb_autopm_get_interface(dev->intf);
3542 if (status < 0)
3543 goto fail_halt;
3544 status = usb_clear_halt(dev->udev, dev->pipe_in);
3545 usb_autopm_put_interface(dev->intf);
3546 if (status < 0 &&
3547 status != -EPIPE &&
3548 status != -ESHUTDOWN) {
3549 if (netif_msg_rx_err(dev))
3550 fail_halt:
3551 netdev_err(dev->net,
3552 "can't clear rx halt, status %d\n",
3553 status);
3554 } else {
3555 clear_bit(EVENT_RX_HALT, &dev->flags);
3556 tasklet_schedule(&dev->bh);
3557 }
3558 }
3559
3560 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3561 int ret = 0;
3562
3563 clear_bit(EVENT_LINK_RESET, &dev->flags);
3564 status = usb_autopm_get_interface(dev->intf);
3565 if (status < 0)
3566 goto skip_reset;
3567 if (lan78xx_link_reset(dev) < 0) {
3568 usb_autopm_put_interface(dev->intf);
3569 skip_reset:
3570 netdev_info(dev->net, "link reset failed (%d)\n",
3571 ret);
3572 } else {
3573 usb_autopm_put_interface(dev->intf);
3574 }
3575 }
3576
3577 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3578 lan78xx_update_stats(dev);
3579
3580 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3581
3582 mod_timer(&dev->stat_monitor,
3583 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3584
3585 dev->delta = min((dev->delta * 2), 50);
3586 }
3587 }
3588
3589 static void intr_complete(struct urb *urb)
3590 {
3591 struct lan78xx_net *dev = urb->context;
3592 int status = urb->status;
3593
3594 switch (status) {
3595
3596 case 0:
3597 lan78xx_status(dev, urb);
3598 break;
3599
3600
3601 case -ENOENT:
3602 case -ESHUTDOWN:
3603 netif_dbg(dev, ifdown, dev->net,
3604 "intr shutdown, code %d\n", status);
3605 return;
3606
3607
3608
3609
3610 default:
3611 netdev_dbg(dev->net, "intr status %d\n", status);
3612 break;
3613 }
3614
3615 if (!netif_running(dev->net))
3616 return;
3617
3618 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3619 status = usb_submit_urb(urb, GFP_ATOMIC);
3620 if (status != 0)
3621 netif_err(dev, timer, dev->net,
3622 "intr resubmit --> %d\n", status);
3623 }
3624
3625 static void lan78xx_disconnect(struct usb_interface *intf)
3626 {
3627 struct lan78xx_net *dev;
3628 struct usb_device *udev;
3629 struct net_device *net;
3630 struct phy_device *phydev;
3631
3632 dev = usb_get_intfdata(intf);
3633 usb_set_intfdata(intf, NULL);
3634 if (!dev)
3635 return;
3636
3637 udev = interface_to_usbdev(intf);
3638 net = dev->net;
3639 phydev = net->phydev;
3640
3641 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3642 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3643
3644 phy_disconnect(net->phydev);
3645
3646 if (phy_is_pseudo_fixed_link(phydev))
3647 fixed_phy_unregister(phydev);
3648
3649 unregister_netdev(net);
3650
3651 cancel_delayed_work_sync(&dev->wq);
3652
3653 usb_scuttle_anchored_urbs(&dev->deferred);
3654
3655 lan78xx_unbind(dev, intf);
3656
3657 usb_kill_urb(dev->urb_intr);
3658 usb_free_urb(dev->urb_intr);
3659
3660 free_netdev(net);
3661 usb_put_dev(udev);
3662 }
3663
3664 static void lan78xx_tx_timeout(struct net_device *net)
3665 {
3666 struct lan78xx_net *dev = netdev_priv(net);
3667
3668 unlink_urbs(dev, &dev->txq);
3669 tasklet_schedule(&dev->bh);
3670 }
3671
3672 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3673 struct net_device *netdev,
3674 netdev_features_t features)
3675 {
3676 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3677 features &= ~NETIF_F_GSO_MASK;
3678
3679 features = vlan_features_check(skb, features);
3680 features = vxlan_features_check(skb, features);
3681
3682 return features;
3683 }
3684
3685 static const struct net_device_ops lan78xx_netdev_ops = {
3686 .ndo_open = lan78xx_open,
3687 .ndo_stop = lan78xx_stop,
3688 .ndo_start_xmit = lan78xx_start_xmit,
3689 .ndo_tx_timeout = lan78xx_tx_timeout,
3690 .ndo_change_mtu = lan78xx_change_mtu,
3691 .ndo_set_mac_address = lan78xx_set_mac_addr,
3692 .ndo_validate_addr = eth_validate_addr,
3693 .ndo_do_ioctl = lan78xx_ioctl,
3694 .ndo_set_rx_mode = lan78xx_set_multicast,
3695 .ndo_set_features = lan78xx_set_features,
3696 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3697 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3698 .ndo_features_check = lan78xx_features_check,
3699 };
3700
3701 static void lan78xx_stat_monitor(struct timer_list *t)
3702 {
3703 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3704
3705 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3706 }
3707
3708 static int lan78xx_probe(struct usb_interface *intf,
3709 const struct usb_device_id *id)
3710 {
3711 struct lan78xx_net *dev;
3712 struct net_device *netdev;
3713 struct usb_device *udev;
3714 int ret;
3715 unsigned maxp;
3716 unsigned period;
3717 u8 *buf = NULL;
3718
3719 udev = interface_to_usbdev(intf);
3720 udev = usb_get_dev(udev);
3721
3722 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3723 if (!netdev) {
3724 dev_err(&intf->dev, "Error: OOM\n");
3725 ret = -ENOMEM;
3726 goto out1;
3727 }
3728
3729
3730 SET_NETDEV_DEV(netdev, &intf->dev);
3731
3732 dev = netdev_priv(netdev);
3733 dev->udev = udev;
3734 dev->intf = intf;
3735 dev->net = netdev;
3736 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3737 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3738
3739 skb_queue_head_init(&dev->rxq);
3740 skb_queue_head_init(&dev->txq);
3741 skb_queue_head_init(&dev->done);
3742 skb_queue_head_init(&dev->rxq_pause);
3743 skb_queue_head_init(&dev->txq_pend);
3744 mutex_init(&dev->phy_mutex);
3745
3746 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3747 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3748 init_usb_anchor(&dev->deferred);
3749
3750 netdev->netdev_ops = &lan78xx_netdev_ops;
3751 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3752 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3753
3754 dev->delta = 1;
3755 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3756
3757 mutex_init(&dev->stats.access_lock);
3758
3759 ret = lan78xx_bind(dev, intf);
3760 if (ret < 0)
3761 goto out2;
3762
3763 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3764 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3765
3766
3767 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3768 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3769
3770 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3771 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3772 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3773
3774 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3775 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3776
3777 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3778 dev->ep_intr->desc.bEndpointAddress &
3779 USB_ENDPOINT_NUMBER_MASK);
3780 period = dev->ep_intr->desc.bInterval;
3781
3782 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3783 buf = kmalloc(maxp, GFP_KERNEL);
3784 if (buf) {
3785 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3786 if (!dev->urb_intr) {
3787 ret = -ENOMEM;
3788 kfree(buf);
3789 goto out3;
3790 } else {
3791 usb_fill_int_urb(dev->urb_intr, dev->udev,
3792 dev->pipe_intr, buf, maxp,
3793 intr_complete, dev, period);
3794 }
3795 }
3796
3797 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3798
3799
3800 intf->needs_remote_wakeup = 1;
3801
3802 ret = lan78xx_phy_init(dev);
3803 if (ret < 0)
3804 goto out4;
3805
3806 ret = register_netdev(netdev);
3807 if (ret != 0) {
3808 netif_err(dev, probe, netdev, "couldn't register the device\n");
3809 goto out5;
3810 }
3811
3812 usb_set_intfdata(intf, dev);
3813
3814 ret = device_set_wakeup_enable(&udev->dev, true);
3815
3816
3817
3818
3819 pm_runtime_set_autosuspend_delay(&udev->dev,
3820 DEFAULT_AUTOSUSPEND_DELAY);
3821
3822 return 0;
3823
3824 out5:
3825 phy_disconnect(netdev->phydev);
3826 out4:
3827 usb_free_urb(dev->urb_intr);
3828 out3:
3829 lan78xx_unbind(dev, intf);
3830 out2:
3831 free_netdev(netdev);
3832 out1:
3833 usb_put_dev(udev);
3834
3835 return ret;
3836 }
3837
3838 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3839 {
3840 const u16 crc16poly = 0x8005;
3841 int i;
3842 u16 bit, crc, msb;
3843 u8 data;
3844
3845 crc = 0xFFFF;
3846 for (i = 0; i < len; i++) {
3847 data = *buf++;
3848 for (bit = 0; bit < 8; bit++) {
3849 msb = crc >> 15;
3850 crc <<= 1;
3851
3852 if (msb ^ (u16)(data & 1)) {
3853 crc ^= crc16poly;
3854 crc |= (u16)0x0001U;
3855 }
3856 data >>= 1;
3857 }
3858 }
3859
3860 return crc;
3861 }
3862
3863 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3864 {
3865 u32 buf;
3866 int ret;
3867 int mask_index;
3868 u16 crc;
3869 u32 temp_wucsr;
3870 u32 temp_pmt_ctl;
3871 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3872 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3873 const u8 arp_type[2] = { 0x08, 0x06 };
3874
3875 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3876 buf &= ~MAC_TX_TXEN_;
3877 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3878 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3879 buf &= ~MAC_RX_RXEN_;
3880 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3881
3882 ret = lan78xx_write_reg(dev, WUCSR, 0);
3883 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3884 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3885
3886 temp_wucsr = 0;
3887
3888 temp_pmt_ctl = 0;
3889 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3890 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3891 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3892
3893 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3894 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3895
3896 mask_index = 0;
3897 if (wol & WAKE_PHY) {
3898 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3899
3900 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3901 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3902 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3903 }
3904 if (wol & WAKE_MAGIC) {
3905 temp_wucsr |= WUCSR_MPEN_;
3906
3907 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3908 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3909 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3910 }
3911 if (wol & WAKE_BCAST) {
3912 temp_wucsr |= WUCSR_BCST_EN_;
3913
3914 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3915 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3916 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3917 }
3918 if (wol & WAKE_MCAST) {
3919 temp_wucsr |= WUCSR_WAKE_EN_;
3920
3921
3922 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3923 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3924 WUF_CFGX_EN_ |
3925 WUF_CFGX_TYPE_MCAST_ |
3926 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3927 (crc & WUF_CFGX_CRC16_MASK_));
3928
3929 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3930 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3931 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3932 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3933 mask_index++;
3934
3935
3936 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3937 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3938 WUF_CFGX_EN_ |
3939 WUF_CFGX_TYPE_MCAST_ |
3940 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3941 (crc & WUF_CFGX_CRC16_MASK_));
3942
3943 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3944 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3945 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3946 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3947 mask_index++;
3948
3949 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3950 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3951 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3952 }
3953 if (wol & WAKE_UCAST) {
3954 temp_wucsr |= WUCSR_PFDA_EN_;
3955
3956 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3957 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3958 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3959 }
3960 if (wol & WAKE_ARP) {
3961 temp_wucsr |= WUCSR_WAKE_EN_;
3962
3963
3964
3965
3966 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3967 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3968 WUF_CFGX_EN_ |
3969 WUF_CFGX_TYPE_ALL_ |
3970 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3971 (crc & WUF_CFGX_CRC16_MASK_));
3972
3973 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3974 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3975 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3976 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3977 mask_index++;
3978
3979 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3980 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3981 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3982 }
3983
3984 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3985
3986
3987 if (hweight_long((unsigned long)wol) > 1) {
3988 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3989 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3990 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3991 }
3992 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3993
3994
3995 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3996 buf |= PMT_CTL_WUPS_MASK_;
3997 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3998
3999 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4000 buf |= MAC_RX_RXEN_;
4001 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4002
4003 return 0;
4004 }
4005
4006 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4007 {
4008 struct lan78xx_net *dev = usb_get_intfdata(intf);
4009 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4010 u32 buf;
4011 int ret;
4012 int event;
4013
4014 event = message.event;
4015
4016 if (!dev->suspend_count++) {
4017 spin_lock_irq(&dev->txq.lock);
4018
4019 if ((skb_queue_len(&dev->txq) ||
4020 skb_queue_len(&dev->txq_pend)) &&
4021 PMSG_IS_AUTO(message)) {
4022 spin_unlock_irq(&dev->txq.lock);
4023 ret = -EBUSY;
4024 goto out;
4025 } else {
4026 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4027 spin_unlock_irq(&dev->txq.lock);
4028 }
4029
4030
4031 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4032 buf &= ~MAC_TX_TXEN_;
4033 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4034 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4035 buf &= ~MAC_RX_RXEN_;
4036 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4037
4038
4039 netif_device_detach(dev->net);
4040 lan78xx_terminate_urbs(dev);
4041 usb_kill_urb(dev->urb_intr);
4042
4043
4044 netif_device_attach(dev->net);
4045 }
4046
4047 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4048 del_timer(&dev->stat_monitor);
4049
4050 if (PMSG_IS_AUTO(message)) {
4051
4052 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4053 buf &= ~MAC_TX_TXEN_;
4054 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4055 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4056 buf &= ~MAC_RX_RXEN_;
4057 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4058
4059 ret = lan78xx_write_reg(dev, WUCSR, 0);
4060 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4061 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4062
4063
4064 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4065
4066 buf |= WUCSR_RFE_WAKE_EN_;
4067 buf |= WUCSR_STORE_WAKE_;
4068
4069 ret = lan78xx_write_reg(dev, WUCSR, buf);
4070
4071 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4072
4073 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4074 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4075
4076 buf |= PMT_CTL_PHY_WAKE_EN_;
4077 buf |= PMT_CTL_WOL_EN_;
4078 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4079 buf |= PMT_CTL_SUS_MODE_3_;
4080
4081 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4082
4083 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4084
4085 buf |= PMT_CTL_WUPS_MASK_;
4086
4087 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4088
4089 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4090 buf |= MAC_RX_RXEN_;
4091 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4092 } else {
4093 lan78xx_set_suspend(dev, pdata->wol);
4094 }
4095 }
4096
4097 ret = 0;
4098 out:
4099 return ret;
4100 }
4101
4102 static int lan78xx_resume(struct usb_interface *intf)
4103 {
4104 struct lan78xx_net *dev = usb_get_intfdata(intf);
4105 struct sk_buff *skb;
4106 struct urb *res;
4107 int ret;
4108 u32 buf;
4109
4110 if (!timer_pending(&dev->stat_monitor)) {
4111 dev->delta = 1;
4112 mod_timer(&dev->stat_monitor,
4113 jiffies + STAT_UPDATE_TIMER);
4114 }
4115
4116 if (!--dev->suspend_count) {
4117
4118 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4119 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4120
4121 spin_lock_irq(&dev->txq.lock);
4122 while ((res = usb_get_from_anchor(&dev->deferred))) {
4123 skb = (struct sk_buff *)res->context;
4124 ret = usb_submit_urb(res, GFP_ATOMIC);
4125 if (ret < 0) {
4126 dev_kfree_skb_any(skb);
4127 usb_free_urb(res);
4128 usb_autopm_put_interface_async(dev->intf);
4129 } else {
4130 netif_trans_update(dev->net);
4131 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4132 }
4133 }
4134
4135 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4136 spin_unlock_irq(&dev->txq.lock);
4137
4138 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4139 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4140 netif_start_queue(dev->net);
4141 tasklet_schedule(&dev->bh);
4142 }
4143 }
4144
4145 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4146 ret = lan78xx_write_reg(dev, WUCSR, 0);
4147 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4148
4149 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4150 WUCSR2_ARP_RCD_ |
4151 WUCSR2_IPV6_TCPSYN_RCD_ |
4152 WUCSR2_IPV4_TCPSYN_RCD_);
4153
4154 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4155 WUCSR_EEE_RX_WAKE_ |
4156 WUCSR_PFDA_FR_ |
4157 WUCSR_RFE_WAKE_FR_ |
4158 WUCSR_WUFR_ |
4159 WUCSR_MPR_ |
4160 WUCSR_BCST_FR_);
4161
4162 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4163 buf |= MAC_TX_TXEN_;
4164 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4165
4166 return 0;
4167 }
4168
4169 static int lan78xx_reset_resume(struct usb_interface *intf)
4170 {
4171 struct lan78xx_net *dev = usb_get_intfdata(intf);
4172
4173 lan78xx_reset(dev);
4174
4175 phy_start(dev->net->phydev);
4176
4177 return lan78xx_resume(intf);
4178 }
4179
4180 static const struct usb_device_id products[] = {
4181 {
4182
4183 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4184 },
4185 {
4186
4187 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4188 },
4189 {
4190
4191 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4192 },
4193 {},
4194 };
4195 MODULE_DEVICE_TABLE(usb, products);
4196
4197 static struct usb_driver lan78xx_driver = {
4198 .name = DRIVER_NAME,
4199 .id_table = products,
4200 .probe = lan78xx_probe,
4201 .disconnect = lan78xx_disconnect,
4202 .suspend = lan78xx_suspend,
4203 .resume = lan78xx_resume,
4204 .reset_resume = lan78xx_reset_resume,
4205 .supports_autosuspend = 1,
4206 .disable_hub_initiated_lpm = 1,
4207 };
4208
4209 module_usb_driver(lan78xx_driver);
4210
4211 MODULE_AUTHOR(DRIVER_AUTHOR);
4212 MODULE_DESCRIPTION(DRIVER_DESC);
4213 MODULE_LICENSE("GPL");