This source file includes following definitions.
- generate_cookie
- parse_cookie
- index_to_pioqueue_base
- pio_txqueue_offset
- pio_rxqueue_offset
- b43_setup_pioqueue_tx
- b43_setup_pioqueue_rx
- b43_pio_cancel_tx_packets
- b43_destroy_pioqueue_tx
- b43_destroy_pioqueue_rx
- b43_pio_free
- b43_pio_init
- select_queue_by_priority
- tx_write_2byte_queue
- pio_tx_frame_2byte_queue
- tx_write_4byte_queue
- pio_tx_frame_4byte_queue
- pio_tx_frame
- b43_pio_tx
- b43_pio_handle_txstatus
- pio_rx_frame
- b43_pio_rx
- b43_pio_tx_suspend_queue
- b43_pio_tx_resume_queue
- b43_pio_tx_suspend
- b43_pio_tx_resume
1
2
3
4
5
6
7
8
9
10
11
12
13 #include "b43.h"
14 #include "pio.h"
15 #include "dma.h"
16 #include "main.h"
17 #include "xmit.h"
18
19 #include <linux/delay.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22
23
24 static u16 generate_cookie(struct b43_pio_txqueue *q,
25 struct b43_pio_txpacket *pack)
26 {
27 u16 cookie;
28
29
30
31
32
33
34
35
36
37 cookie = (((u16)q->index + 1) << 12);
38 cookie |= pack->index;
39
40 return cookie;
41 }
42
43 static
44 struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
45 u16 cookie,
46 struct b43_pio_txpacket **pack)
47 {
48 struct b43_pio *pio = &dev->pio;
49 struct b43_pio_txqueue *q = NULL;
50 unsigned int pack_index;
51
52 switch (cookie & 0xF000) {
53 case 0x1000:
54 q = pio->tx_queue_AC_BK;
55 break;
56 case 0x2000:
57 q = pio->tx_queue_AC_BE;
58 break;
59 case 0x3000:
60 q = pio->tx_queue_AC_VI;
61 break;
62 case 0x4000:
63 q = pio->tx_queue_AC_VO;
64 break;
65 case 0x5000:
66 q = pio->tx_queue_mcast;
67 break;
68 }
69 if (B43_WARN_ON(!q))
70 return NULL;
71 pack_index = (cookie & 0x0FFF);
72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
73 return NULL;
74 *pack = &q->packets[pack_index];
75
76 return q;
77 }
78
79 static u16 index_to_pioqueue_base(struct b43_wldev *dev,
80 unsigned int index)
81 {
82 static const u16 bases[] = {
83 B43_MMIO_PIO_BASE0,
84 B43_MMIO_PIO_BASE1,
85 B43_MMIO_PIO_BASE2,
86 B43_MMIO_PIO_BASE3,
87 B43_MMIO_PIO_BASE4,
88 B43_MMIO_PIO_BASE5,
89 B43_MMIO_PIO_BASE6,
90 B43_MMIO_PIO_BASE7,
91 };
92 static const u16 bases_rev11[] = {
93 B43_MMIO_PIO11_BASE0,
94 B43_MMIO_PIO11_BASE1,
95 B43_MMIO_PIO11_BASE2,
96 B43_MMIO_PIO11_BASE3,
97 B43_MMIO_PIO11_BASE4,
98 B43_MMIO_PIO11_BASE5,
99 };
100
101 if (dev->dev->core_rev >= 11) {
102 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
103 return bases_rev11[index];
104 }
105 B43_WARN_ON(index >= ARRAY_SIZE(bases));
106 return bases[index];
107 }
108
109 static u16 pio_txqueue_offset(struct b43_wldev *dev)
110 {
111 if (dev->dev->core_rev >= 11)
112 return 0x18;
113 return 0;
114 }
115
116 static u16 pio_rxqueue_offset(struct b43_wldev *dev)
117 {
118 if (dev->dev->core_rev >= 11)
119 return 0x38;
120 return 8;
121 }
122
123 static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
124 unsigned int index)
125 {
126 struct b43_pio_txqueue *q;
127 struct b43_pio_txpacket *p;
128 unsigned int i;
129
130 q = kzalloc(sizeof(*q), GFP_KERNEL);
131 if (!q)
132 return NULL;
133 q->dev = dev;
134 q->rev = dev->dev->core_rev;
135 q->mmio_base = index_to_pioqueue_base(dev, index) +
136 pio_txqueue_offset(dev);
137 q->index = index;
138
139 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
140 if (q->rev >= 8) {
141 q->buffer_size = 1920;
142 } else {
143 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
144 q->buffer_size -= 80;
145 }
146
147 INIT_LIST_HEAD(&q->packets_list);
148 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
149 p = &(q->packets[i]);
150 INIT_LIST_HEAD(&p->list);
151 p->index = i;
152 p->queue = q;
153 list_add(&p->list, &q->packets_list);
154 }
155
156 return q;
157 }
158
159 static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
160 unsigned int index)
161 {
162 struct b43_pio_rxqueue *q;
163
164 q = kzalloc(sizeof(*q), GFP_KERNEL);
165 if (!q)
166 return NULL;
167 q->dev = dev;
168 q->rev = dev->dev->core_rev;
169 q->mmio_base = index_to_pioqueue_base(dev, index) +
170 pio_rxqueue_offset(dev);
171
172
173 b43_dma_direct_fifo_rx(dev, index, 1);
174
175 return q;
176 }
177
178 static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
179 {
180 struct b43_pio_txpacket *pack;
181 unsigned int i;
182
183 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
184 pack = &(q->packets[i]);
185 if (pack->skb) {
186 ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
187 pack->skb = NULL;
188 }
189 }
190 }
191
192 static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
193 const char *name)
194 {
195 if (!q)
196 return;
197 b43_pio_cancel_tx_packets(q);
198 kfree(q);
199 }
200
201 static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
202 const char *name)
203 {
204 if (!q)
205 return;
206 kfree(q);
207 }
208
209 #define destroy_queue_tx(pio, queue) do { \
210 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
211 (pio)->queue = NULL; \
212 } while (0)
213
214 #define destroy_queue_rx(pio, queue) do { \
215 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
216 (pio)->queue = NULL; \
217 } while (0)
218
219 void b43_pio_free(struct b43_wldev *dev)
220 {
221 struct b43_pio *pio;
222
223 if (!b43_using_pio_transfers(dev))
224 return;
225 pio = &dev->pio;
226
227 destroy_queue_rx(pio, rx_queue);
228 destroy_queue_tx(pio, tx_queue_mcast);
229 destroy_queue_tx(pio, tx_queue_AC_VO);
230 destroy_queue_tx(pio, tx_queue_AC_VI);
231 destroy_queue_tx(pio, tx_queue_AC_BE);
232 destroy_queue_tx(pio, tx_queue_AC_BK);
233 }
234
235 int b43_pio_init(struct b43_wldev *dev)
236 {
237 struct b43_pio *pio = &dev->pio;
238 int err = -ENOMEM;
239
240 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
241 & ~B43_MACCTL_BE);
242 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
243
244 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
245 if (!pio->tx_queue_AC_BK)
246 goto out;
247
248 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
249 if (!pio->tx_queue_AC_BE)
250 goto err_destroy_bk;
251
252 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
253 if (!pio->tx_queue_AC_VI)
254 goto err_destroy_be;
255
256 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
257 if (!pio->tx_queue_AC_VO)
258 goto err_destroy_vi;
259
260 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
261 if (!pio->tx_queue_mcast)
262 goto err_destroy_vo;
263
264 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
265 if (!pio->rx_queue)
266 goto err_destroy_mcast;
267
268 b43dbg(dev->wl, "PIO initialized\n");
269 err = 0;
270 out:
271 return err;
272
273 err_destroy_mcast:
274 destroy_queue_tx(pio, tx_queue_mcast);
275 err_destroy_vo:
276 destroy_queue_tx(pio, tx_queue_AC_VO);
277 err_destroy_vi:
278 destroy_queue_tx(pio, tx_queue_AC_VI);
279 err_destroy_be:
280 destroy_queue_tx(pio, tx_queue_AC_BE);
281 err_destroy_bk:
282 destroy_queue_tx(pio, tx_queue_AC_BK);
283 return err;
284 }
285
286
287 static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
288 u8 queue_prio)
289 {
290 struct b43_pio_txqueue *q;
291
292 if (dev->qos_enabled) {
293
294 switch (queue_prio) {
295 default:
296 B43_WARN_ON(1);
297
298 case 0:
299 q = dev->pio.tx_queue_AC_VO;
300 break;
301 case 1:
302 q = dev->pio.tx_queue_AC_VI;
303 break;
304 case 2:
305 q = dev->pio.tx_queue_AC_BE;
306 break;
307 case 3:
308 q = dev->pio.tx_queue_AC_BK;
309 break;
310 }
311 } else
312 q = dev->pio.tx_queue_AC_BE;
313
314 return q;
315 }
316
317 static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
318 u16 ctl,
319 const void *_data,
320 unsigned int data_len)
321 {
322 struct b43_wldev *dev = q->dev;
323 struct b43_wl *wl = dev->wl;
324 const u8 *data = _data;
325
326 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
327 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
328
329 b43_block_write(dev, data, (data_len & ~1),
330 q->mmio_base + B43_PIO_TXDATA,
331 sizeof(u16));
332 if (data_len & 1) {
333 u8 *tail = wl->pio_tailspace;
334 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
335
336
337 ctl &= ~B43_PIO_TXCTL_WRITEHI;
338 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
339 tail[0] = data[data_len - 1];
340 tail[1] = 0;
341 b43_block_write(dev, tail, 2,
342 q->mmio_base + B43_PIO_TXDATA,
343 sizeof(u16));
344 }
345
346 return ctl;
347 }
348
349 static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
350 const u8 *hdr, unsigned int hdrlen)
351 {
352 struct b43_pio_txqueue *q = pack->queue;
353 const char *frame = pack->skb->data;
354 unsigned int frame_len = pack->skb->len;
355 u16 ctl;
356
357 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
358 ctl |= B43_PIO_TXCTL_FREADY;
359 ctl &= ~B43_PIO_TXCTL_EOF;
360
361
362 ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
363
364 ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
365
366 ctl |= B43_PIO_TXCTL_EOF;
367 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
368 }
369
370 static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
371 u32 ctl,
372 const void *_data,
373 unsigned int data_len)
374 {
375 struct b43_wldev *dev = q->dev;
376 struct b43_wl *wl = dev->wl;
377 const u8 *data = _data;
378
379 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
380 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
381 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
382
383 b43_block_write(dev, data, (data_len & ~3),
384 q->mmio_base + B43_PIO8_TXDATA,
385 sizeof(u32));
386 if (data_len & 3) {
387 u8 *tail = wl->pio_tailspace;
388 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
389
390 memset(tail, 0, 4);
391
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31);
394 switch (data_len & 3) {
395 case 3:
396 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
397 tail[0] = data[data_len - 3];
398 tail[1] = data[data_len - 2];
399 tail[2] = data[data_len - 1];
400 break;
401 case 2:
402 ctl |= B43_PIO8_TXCTL_8_15;
403 tail[0] = data[data_len - 2];
404 tail[1] = data[data_len - 1];
405 break;
406 case 1:
407 tail[0] = data[data_len - 1];
408 break;
409 }
410 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
411 b43_block_write(dev, tail, 4,
412 q->mmio_base + B43_PIO8_TXDATA,
413 sizeof(u32));
414 }
415
416 return ctl;
417 }
418
419 static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
420 const u8 *hdr, unsigned int hdrlen)
421 {
422 struct b43_pio_txqueue *q = pack->queue;
423 const char *frame = pack->skb->data;
424 unsigned int frame_len = pack->skb->len;
425 u32 ctl;
426
427 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
428 ctl |= B43_PIO8_TXCTL_FREADY;
429 ctl &= ~B43_PIO8_TXCTL_EOF;
430
431
432 ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
433
434 ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
435
436 ctl |= B43_PIO8_TXCTL_EOF;
437 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
438 }
439
440 static int pio_tx_frame(struct b43_pio_txqueue *q,
441 struct sk_buff *skb)
442 {
443 struct b43_wldev *dev = q->dev;
444 struct b43_wl *wl = dev->wl;
445 struct b43_pio_txpacket *pack;
446 u16 cookie;
447 int err;
448 unsigned int hdrlen;
449 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
450 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
451
452 B43_WARN_ON(list_empty(&q->packets_list));
453 pack = list_entry(q->packets_list.next,
454 struct b43_pio_txpacket, list);
455
456 cookie = generate_cookie(q, pack);
457 hdrlen = b43_txhdr_size(dev);
458 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
459 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
460 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
461 info, cookie);
462 if (err)
463 return err;
464
465 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
466
467
468 b43_shm_write16(dev, B43_SHM_SHARED,
469 B43_SHM_SH_MCASTCOOKIE, cookie);
470 }
471
472 pack->skb = skb;
473 if (q->rev >= 8)
474 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
475 else
476 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
477
478
479
480 list_del(&pack->list);
481
482
483 q->buffer_used += roundup(skb->len + hdrlen, 4);
484 q->free_packet_slots -= 1;
485
486 return 0;
487 }
488
489 int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
490 {
491 struct b43_pio_txqueue *q;
492 struct ieee80211_hdr *hdr;
493 unsigned int hdrlen, total_len;
494 int err = 0;
495 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
496
497 hdr = (struct ieee80211_hdr *)skb->data;
498
499 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
500
501 q = dev->pio.tx_queue_mcast;
502
503
504 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
505 } else {
506
507 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
508 }
509
510 hdrlen = b43_txhdr_size(dev);
511 total_len = roundup(skb->len + hdrlen, 4);
512
513 if (unlikely(total_len > q->buffer_size)) {
514 err = -ENOBUFS;
515 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
516 goto out;
517 }
518 if (unlikely(q->free_packet_slots == 0)) {
519 err = -ENOBUFS;
520 b43warn(dev->wl, "PIO: TX packet overflow.\n");
521 goto out;
522 }
523 B43_WARN_ON(q->buffer_used > q->buffer_size);
524
525 if (total_len > (q->buffer_size - q->buffer_used)) {
526
527 err = -EBUSY;
528 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
529 q->stopped = true;
530 goto out;
531 }
532
533
534
535
536 q->queue_prio = skb_get_queue_mapping(skb);
537
538 err = pio_tx_frame(q, skb);
539 if (unlikely(err == -ENOKEY)) {
540
541
542 ieee80211_free_txskb(dev->wl->hw, skb);
543 err = 0;
544 goto out;
545 }
546 if (unlikely(err)) {
547 b43err(dev->wl, "PIO transmission failure\n");
548 goto out;
549 }
550
551 B43_WARN_ON(q->buffer_used > q->buffer_size);
552 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
553 (q->free_packet_slots == 0)) {
554
555 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
556 q->stopped = true;
557 }
558
559 out:
560 return err;
561 }
562
563 void b43_pio_handle_txstatus(struct b43_wldev *dev,
564 const struct b43_txstatus *status)
565 {
566 struct b43_pio_txqueue *q;
567 struct b43_pio_txpacket *pack = NULL;
568 unsigned int total_len;
569 struct ieee80211_tx_info *info;
570
571 q = parse_cookie(dev, status->cookie, &pack);
572 if (unlikely(!q))
573 return;
574 B43_WARN_ON(!pack);
575
576 info = IEEE80211_SKB_CB(pack->skb);
577
578 b43_fill_txstatus_report(dev, info, status);
579
580 total_len = pack->skb->len + b43_txhdr_size(dev);
581 total_len = roundup(total_len, 4);
582 q->buffer_used -= total_len;
583 q->free_packet_slots += 1;
584
585 ieee80211_tx_status(dev->wl->hw, pack->skb);
586 pack->skb = NULL;
587 list_add(&pack->list, &q->packets_list);
588
589 if (q->stopped) {
590 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
591 q->stopped = false;
592 }
593 }
594
595
596 static bool pio_rx_frame(struct b43_pio_rxqueue *q)
597 {
598 struct b43_wldev *dev = q->dev;
599 struct b43_wl *wl = dev->wl;
600 u16 len;
601 u32 macstat = 0;
602 unsigned int i, padding;
603 struct sk_buff *skb;
604 const char *err_msg = NULL;
605 struct b43_rxhdr_fw4 *rxhdr =
606 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
607 size_t rxhdr_size = sizeof(*rxhdr);
608
609 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
610 switch (dev->fw.hdr_format) {
611 case B43_FW_HDR_410:
612 case B43_FW_HDR_351:
613 rxhdr_size -= sizeof(rxhdr->format_598) -
614 sizeof(rxhdr->format_351);
615 break;
616 case B43_FW_HDR_598:
617 break;
618 }
619 memset(rxhdr, 0, rxhdr_size);
620
621
622 if (q->rev >= 8) {
623 u32 ctl;
624
625 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
626 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
627 return false;
628 b43_piorx_write32(q, B43_PIO8_RXCTL,
629 B43_PIO8_RXCTL_FRAMERDY);
630 for (i = 0; i < 10; i++) {
631 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
632 if (ctl & B43_PIO8_RXCTL_DATARDY)
633 goto data_ready;
634 udelay(10);
635 }
636 } else {
637 u16 ctl;
638
639 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
640 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
641 return false;
642 b43_piorx_write16(q, B43_PIO_RXCTL,
643 B43_PIO_RXCTL_FRAMERDY);
644 for (i = 0; i < 10; i++) {
645 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
646 if (ctl & B43_PIO_RXCTL_DATARDY)
647 goto data_ready;
648 udelay(10);
649 }
650 }
651 b43dbg(q->dev->wl, "PIO RX timed out\n");
652 return true;
653 data_ready:
654
655
656 if (q->rev >= 8) {
657 b43_block_read(dev, rxhdr, rxhdr_size,
658 q->mmio_base + B43_PIO8_RXDATA,
659 sizeof(u32));
660 } else {
661 b43_block_read(dev, rxhdr, rxhdr_size,
662 q->mmio_base + B43_PIO_RXDATA,
663 sizeof(u16));
664 }
665
666 len = le16_to_cpu(rxhdr->frame_len);
667 if (unlikely(len > 0x700)) {
668 err_msg = "len > 0x700";
669 goto rx_error;
670 }
671 if (unlikely(len == 0)) {
672 err_msg = "len == 0";
673 goto rx_error;
674 }
675
676 switch (dev->fw.hdr_format) {
677 case B43_FW_HDR_598:
678 macstat = le32_to_cpu(rxhdr->format_598.mac_status);
679 break;
680 case B43_FW_HDR_410:
681 case B43_FW_HDR_351:
682 macstat = le32_to_cpu(rxhdr->format_351.mac_status);
683 break;
684 }
685 if (macstat & B43_RX_MAC_FCSERR) {
686 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
687
688 err_msg = "Frame FCS error";
689 goto rx_error;
690 }
691 }
692
693
694
695
696 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
697 skb = dev_alloc_skb(len + padding + 2);
698 if (unlikely(!skb)) {
699 err_msg = "Out of memory";
700 goto rx_error;
701 }
702 skb_reserve(skb, 2);
703 skb_put(skb, len + padding);
704 if (q->rev >= 8) {
705 b43_block_read(dev, skb->data + padding, (len & ~3),
706 q->mmio_base + B43_PIO8_RXDATA,
707 sizeof(u32));
708 if (len & 3) {
709 u8 *tail = wl->pio_tailspace;
710 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
711
712
713 b43_block_read(dev, tail, 4,
714 q->mmio_base + B43_PIO8_RXDATA,
715 sizeof(u32));
716 switch (len & 3) {
717 case 3:
718 skb->data[len + padding - 3] = tail[0];
719 skb->data[len + padding - 2] = tail[1];
720 skb->data[len + padding - 1] = tail[2];
721 break;
722 case 2:
723 skb->data[len + padding - 2] = tail[0];
724 skb->data[len + padding - 1] = tail[1];
725 break;
726 case 1:
727 skb->data[len + padding - 1] = tail[0];
728 break;
729 }
730 }
731 } else {
732 b43_block_read(dev, skb->data + padding, (len & ~1),
733 q->mmio_base + B43_PIO_RXDATA,
734 sizeof(u16));
735 if (len & 1) {
736 u8 *tail = wl->pio_tailspace;
737 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
738
739
740 b43_block_read(dev, tail, 2,
741 q->mmio_base + B43_PIO_RXDATA,
742 sizeof(u16));
743 skb->data[len + padding - 1] = tail[0];
744 }
745 }
746
747 b43_rx(q->dev, skb, rxhdr);
748
749 return true;
750
751 rx_error:
752 if (err_msg)
753 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
754 if (q->rev >= 8)
755 b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
756 else
757 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
758
759 return true;
760 }
761
762 void b43_pio_rx(struct b43_pio_rxqueue *q)
763 {
764 unsigned int count = 0;
765 bool stop;
766
767 while (1) {
768 stop = (pio_rx_frame(q) == 0);
769 if (stop)
770 break;
771 cond_resched();
772 if (WARN_ON_ONCE(++count > 10000))
773 break;
774 }
775 }
776
777 static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
778 {
779 if (q->rev >= 8) {
780 b43_piotx_write32(q, B43_PIO8_TXCTL,
781 b43_piotx_read32(q, B43_PIO8_TXCTL)
782 | B43_PIO8_TXCTL_SUSPREQ);
783 } else {
784 b43_piotx_write16(q, B43_PIO_TXCTL,
785 b43_piotx_read16(q, B43_PIO_TXCTL)
786 | B43_PIO_TXCTL_SUSPREQ);
787 }
788 }
789
790 static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
791 {
792 if (q->rev >= 8) {
793 b43_piotx_write32(q, B43_PIO8_TXCTL,
794 b43_piotx_read32(q, B43_PIO8_TXCTL)
795 & ~B43_PIO8_TXCTL_SUSPREQ);
796 } else {
797 b43_piotx_write16(q, B43_PIO_TXCTL,
798 b43_piotx_read16(q, B43_PIO_TXCTL)
799 & ~B43_PIO_TXCTL_SUSPREQ);
800 }
801 }
802
803 void b43_pio_tx_suspend(struct b43_wldev *dev)
804 {
805 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
806 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
807 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
808 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
809 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
810 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
811 }
812
813 void b43_pio_tx_resume(struct b43_wldev *dev)
814 {
815 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
816 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
817 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
818 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
819 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
820 b43_power_saving_ctl_bits(dev, 0);
821 }