This source file includes following definitions.
- qca_tty_receive
- qcauart_transmit
- qca_tty_wakeup
- qcauart_netdev_open
- qcauart_netdev_close
- qcauart_netdev_xmit
- qcauart_netdev_tx_timeout
- qcauart_netdev_init
- qcauart_netdev_uninit
- qcauart_netdev_setup
- qca_uart_probe
- qca_uart_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/device.h>
26 #include <linux/errno.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/netdevice.h>
34 #include <linux/of.h>
35 #include <linux/of_device.h>
36 #include <linux/of_net.h>
37 #include <linux/sched.h>
38 #include <linux/serdev.h>
39 #include <linux/skbuff.h>
40 #include <linux/types.h>
41
42 #include "qca_7k_common.h"
43
44 #define QCAUART_DRV_VERSION "0.1.0"
45 #define QCAUART_DRV_NAME "qcauart"
46 #define QCAUART_TX_TIMEOUT (1 * HZ)
47
48 struct qcauart {
49 struct net_device *net_dev;
50 spinlock_t lock;
51 struct work_struct tx_work;
52
53 struct serdev_device *serdev;
54 struct qcafrm_handle frm_handle;
55 struct sk_buff *rx_skb;
56
57 unsigned char *tx_head;
58 int tx_left;
59 unsigned char *tx_buffer;
60 };
61
62 static int
63 qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
64 size_t count)
65 {
66 struct qcauart *qca = serdev_device_get_drvdata(serdev);
67 struct net_device *netdev = qca->net_dev;
68 struct net_device_stats *n_stats = &netdev->stats;
69 size_t i;
70
71 if (!qca->rx_skb) {
72 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
73 netdev->mtu +
74 VLAN_ETH_HLEN);
75 if (!qca->rx_skb) {
76 n_stats->rx_errors++;
77 n_stats->rx_dropped++;
78 return 0;
79 }
80 }
81
82 for (i = 0; i < count; i++) {
83 s32 retcode;
84
85 retcode = qcafrm_fsm_decode(&qca->frm_handle,
86 qca->rx_skb->data,
87 skb_tailroom(qca->rx_skb),
88 data[i]);
89
90 switch (retcode) {
91 case QCAFRM_GATHER:
92 case QCAFRM_NOHEAD:
93 break;
94 case QCAFRM_NOTAIL:
95 netdev_dbg(netdev, "recv: no RX tail\n");
96 n_stats->rx_errors++;
97 n_stats->rx_dropped++;
98 break;
99 case QCAFRM_INVLEN:
100 netdev_dbg(netdev, "recv: invalid RX length\n");
101 n_stats->rx_errors++;
102 n_stats->rx_dropped++;
103 break;
104 default:
105 n_stats->rx_packets++;
106 n_stats->rx_bytes += retcode;
107 skb_put(qca->rx_skb, retcode);
108 qca->rx_skb->protocol = eth_type_trans(
109 qca->rx_skb, qca->rx_skb->dev);
110 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
111 netif_rx_ni(qca->rx_skb);
112 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
113 netdev->mtu +
114 VLAN_ETH_HLEN);
115 if (!qca->rx_skb) {
116 netdev_dbg(netdev, "recv: out of RX resources\n");
117 n_stats->rx_errors++;
118 return i;
119 }
120 }
121 }
122
123 return i;
124 }
125
126
127 static void qcauart_transmit(struct work_struct *work)
128 {
129 struct qcauart *qca = container_of(work, struct qcauart, tx_work);
130 struct net_device_stats *n_stats = &qca->net_dev->stats;
131 int written;
132
133 spin_lock_bh(&qca->lock);
134
135
136 if (!netif_running(qca->net_dev)) {
137 spin_unlock_bh(&qca->lock);
138 return;
139 }
140
141 if (qca->tx_left <= 0) {
142
143
144
145 n_stats->tx_packets++;
146 spin_unlock_bh(&qca->lock);
147 netif_wake_queue(qca->net_dev);
148 return;
149 }
150
151 written = serdev_device_write_buf(qca->serdev, qca->tx_head,
152 qca->tx_left);
153 if (written > 0) {
154 qca->tx_left -= written;
155 qca->tx_head += written;
156 }
157 spin_unlock_bh(&qca->lock);
158 }
159
160
161
162
163 static void qca_tty_wakeup(struct serdev_device *serdev)
164 {
165 struct qcauart *qca = serdev_device_get_drvdata(serdev);
166
167 schedule_work(&qca->tx_work);
168 }
169
170 static struct serdev_device_ops qca_serdev_ops = {
171 .receive_buf = qca_tty_receive,
172 .write_wakeup = qca_tty_wakeup,
173 };
174
175 static int qcauart_netdev_open(struct net_device *dev)
176 {
177 struct qcauart *qca = netdev_priv(dev);
178
179 netif_start_queue(qca->net_dev);
180
181 return 0;
182 }
183
184 static int qcauart_netdev_close(struct net_device *dev)
185 {
186 struct qcauart *qca = netdev_priv(dev);
187
188 netif_stop_queue(dev);
189 flush_work(&qca->tx_work);
190
191 spin_lock_bh(&qca->lock);
192 qca->tx_left = 0;
193 spin_unlock_bh(&qca->lock);
194
195 return 0;
196 }
197
198 static netdev_tx_t
199 qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
200 {
201 struct net_device_stats *n_stats = &dev->stats;
202 struct qcauart *qca = netdev_priv(dev);
203 u8 pad_len = 0;
204 int written;
205 u8 *pos;
206
207 spin_lock(&qca->lock);
208
209 WARN_ON(qca->tx_left);
210
211 if (!netif_running(dev)) {
212 spin_unlock(&qca->lock);
213 netdev_warn(qca->net_dev, "xmit: iface is down\n");
214 goto out;
215 }
216
217 pos = qca->tx_buffer;
218
219 if (skb->len < QCAFRM_MIN_LEN)
220 pad_len = QCAFRM_MIN_LEN - skb->len;
221
222 pos += qcafrm_create_header(pos, skb->len + pad_len);
223
224 memcpy(pos, skb->data, skb->len);
225 pos += skb->len;
226
227 if (pad_len) {
228 memset(pos, 0, pad_len);
229 pos += pad_len;
230 }
231
232 pos += qcafrm_create_footer(pos);
233
234 netif_stop_queue(qca->net_dev);
235
236 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
237 pos - qca->tx_buffer);
238 if (written > 0) {
239 qca->tx_left = (pos - qca->tx_buffer) - written;
240 qca->tx_head = qca->tx_buffer + written;
241 n_stats->tx_bytes += written;
242 }
243 spin_unlock(&qca->lock);
244
245 netif_trans_update(dev);
246 out:
247 dev_kfree_skb_any(skb);
248 return NETDEV_TX_OK;
249 }
250
251 static void qcauart_netdev_tx_timeout(struct net_device *dev)
252 {
253 struct qcauart *qca = netdev_priv(dev);
254
255 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
256 jiffies, dev_trans_start(dev));
257 dev->stats.tx_errors++;
258 dev->stats.tx_dropped++;
259 }
260
261 static int qcauart_netdev_init(struct net_device *dev)
262 {
263 struct qcauart *qca = netdev_priv(dev);
264 size_t len;
265
266
267 dev->mtu = QCAFRM_MAX_MTU;
268 dev->type = ARPHRD_ETHER;
269
270 len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
271 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
272 if (!qca->tx_buffer)
273 return -ENOMEM;
274
275 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
276 qca->net_dev->mtu +
277 VLAN_ETH_HLEN);
278 if (!qca->rx_skb)
279 return -ENOBUFS;
280
281 return 0;
282 }
283
284 static void qcauart_netdev_uninit(struct net_device *dev)
285 {
286 struct qcauart *qca = netdev_priv(dev);
287
288 dev_kfree_skb(qca->rx_skb);
289 }
290
291 static const struct net_device_ops qcauart_netdev_ops = {
292 .ndo_init = qcauart_netdev_init,
293 .ndo_uninit = qcauart_netdev_uninit,
294 .ndo_open = qcauart_netdev_open,
295 .ndo_stop = qcauart_netdev_close,
296 .ndo_start_xmit = qcauart_netdev_xmit,
297 .ndo_set_mac_address = eth_mac_addr,
298 .ndo_tx_timeout = qcauart_netdev_tx_timeout,
299 .ndo_validate_addr = eth_validate_addr,
300 };
301
302 static void qcauart_netdev_setup(struct net_device *dev)
303 {
304 dev->netdev_ops = &qcauart_netdev_ops;
305 dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
306 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
307 dev->tx_queue_len = 100;
308
309
310 dev->min_mtu = QCAFRM_MIN_MTU;
311 dev->max_mtu = QCAFRM_MAX_MTU;
312 }
313
314 static const struct of_device_id qca_uart_of_match[] = {
315 {
316 .compatible = "qca,qca7000",
317 },
318 {}
319 };
320 MODULE_DEVICE_TABLE(of, qca_uart_of_match);
321
322 static int qca_uart_probe(struct serdev_device *serdev)
323 {
324 struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
325 struct qcauart *qca;
326 const char *mac;
327 u32 speed = 115200;
328 int ret;
329
330 if (!qcauart_dev)
331 return -ENOMEM;
332
333 qcauart_netdev_setup(qcauart_dev);
334 SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
335
336 qca = netdev_priv(qcauart_dev);
337 if (!qca) {
338 pr_err("qca_uart: Fail to retrieve private structure\n");
339 ret = -ENOMEM;
340 goto free;
341 }
342 qca->net_dev = qcauart_dev;
343 qca->serdev = serdev;
344 qcafrm_fsm_init_uart(&qca->frm_handle);
345
346 spin_lock_init(&qca->lock);
347 INIT_WORK(&qca->tx_work, qcauart_transmit);
348
349 of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
350
351 mac = of_get_mac_address(serdev->dev.of_node);
352
353 if (!IS_ERR(mac))
354 ether_addr_copy(qca->net_dev->dev_addr, mac);
355
356 if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
357 eth_hw_addr_random(qca->net_dev);
358 dev_info(&serdev->dev, "Using random MAC address: %pM\n",
359 qca->net_dev->dev_addr);
360 }
361
362 netif_carrier_on(qca->net_dev);
363 serdev_device_set_drvdata(serdev, qca);
364 serdev_device_set_client_ops(serdev, &qca_serdev_ops);
365
366 ret = serdev_device_open(serdev);
367 if (ret) {
368 dev_err(&serdev->dev, "Unable to open device %s\n",
369 qcauart_dev->name);
370 goto free;
371 }
372
373 speed = serdev_device_set_baudrate(serdev, speed);
374 dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
375
376 serdev_device_set_flow_control(serdev, false);
377
378 ret = register_netdev(qcauart_dev);
379 if (ret) {
380 dev_err(&serdev->dev, "Unable to register net device %s\n",
381 qcauart_dev->name);
382 serdev_device_close(serdev);
383 cancel_work_sync(&qca->tx_work);
384 goto free;
385 }
386
387 return 0;
388
389 free:
390 free_netdev(qcauart_dev);
391 return ret;
392 }
393
394 static void qca_uart_remove(struct serdev_device *serdev)
395 {
396 struct qcauart *qca = serdev_device_get_drvdata(serdev);
397
398 unregister_netdev(qca->net_dev);
399
400
401 serdev_device_close(serdev);
402 cancel_work_sync(&qca->tx_work);
403
404 free_netdev(qca->net_dev);
405 }
406
407 static struct serdev_device_driver qca_uart_driver = {
408 .probe = qca_uart_probe,
409 .remove = qca_uart_remove,
410 .driver = {
411 .name = QCAUART_DRV_NAME,
412 .of_match_table = of_match_ptr(qca_uart_of_match),
413 },
414 };
415
416 module_serdev_device_driver(qca_uart_driver);
417
418 MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
419 MODULE_AUTHOR("Qualcomm Atheros Communications");
420 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
421 MODULE_LICENSE("Dual BSD/GPL");
422 MODULE_VERSION(QCAUART_DRV_VERSION);