This source file includes following definitions.
- ath5k_hw_start_rx_dma
- ath5k_hw_stop_rx_dma
- ath5k_hw_get_rxdp
- ath5k_hw_set_rxdp
- ath5k_hw_start_tx_dma
- ath5k_hw_stop_tx_dma
- ath5k_hw_stop_beacon_queue
- ath5k_hw_get_txdp
- ath5k_hw_set_txdp
- ath5k_hw_update_tx_triglevel
- ath5k_hw_is_intr_pending
- ath5k_hw_get_isr
- ath5k_hw_set_imr
- ath5k_hw_dma_init
- ath5k_hw_dma_stop
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include "ath5k.h"
35 #include "reg.h"
36 #include "debug.h"
37
38
39
40
41
42
43
44
45
46
47 void
48 ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
49 {
50 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
51 ath5k_hw_reg_read(ah, AR5K_CR);
52 }
53
54
55
56
57
58 static int
59 ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
60 {
61 unsigned int i;
62
63 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
64
65
66
67
68 for (i = 1000; i > 0 &&
69 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
70 i--)
71 udelay(100);
72
73 if (!i)
74 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
75 "failed to stop RX DMA !\n");
76
77 return i ? 0 : -EBUSY;
78 }
79
80
81
82
83
84 u32
85 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
86 {
87 return ath5k_hw_reg_read(ah, AR5K_RXDP);
88 }
89
90
91
92
93
94
95
96
97 int
98 ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
99 {
100 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
101 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
102 "tried to set RXDP while rx was active !\n");
103 return -EIO;
104 }
105
106 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
107 return 0;
108 }
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129 int
130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
131 {
132 u32 tx_queue;
133
134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
135
136
137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
138 return -EINVAL;
139
140 if (ah->ah_version == AR5K_AR5210) {
141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
142
143
144
145
146 switch (ah->ah_txq[queue].tqi_type) {
147 case AR5K_TX_QUEUE_DATA:
148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
149 break;
150 case AR5K_TX_QUEUE_BEACON:
151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
152 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
153 AR5K_BSR);
154 break;
155 case AR5K_TX_QUEUE_CAB:
156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
157 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
158 AR5K_BCR_BDMAE, AR5K_BSR);
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
165 ath5k_hw_reg_read(ah, AR5K_CR);
166 } else {
167
168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
169 return -EIO;
170
171
172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
173 }
174
175 return 0;
176 }
177
178
179
180
181
182
183
184
185
186
187 static int
188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
189 {
190 unsigned int i = 40;
191 u32 tx_queue, pending;
192
193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
194
195
196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
197 return -EINVAL;
198
199 if (ah->ah_version == AR5K_AR5210) {
200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
201
202
203
204
205 switch (ah->ah_txq[queue].tqi_type) {
206 case AR5K_TX_QUEUE_DATA:
207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
208 break;
209 case AR5K_TX_QUEUE_BEACON:
210 case AR5K_TX_QUEUE_CAB:
211
212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
213 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
214 break;
215 default:
216 return -EINVAL;
217 }
218
219
220 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
221 ath5k_hw_reg_read(ah, AR5K_CR);
222 } else {
223
224
225
226
227
228 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
229 AR5K_QCU_MISC_DCU_EARLY);
230
231
232
233
234 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
235
236
237 for (i = 1000; i > 0 &&
238 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
239 i--)
240 udelay(100);
241
242 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
243 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
244 "queue %i didn't stop !\n", queue);
245
246
247 i = 1000;
248 do {
249 pending = ath5k_hw_reg_read(ah,
250 AR5K_QUEUE_STATUS(queue)) &
251 AR5K_QCU_STS_FRMPENDCNT;
252 udelay(100);
253 } while (--i && pending);
254
255
256
257 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
258 pending) {
259
260 ath5k_hw_reg_write(ah,
261 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
262 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
263 AR5K_QUIET_CTL2);
264
265
266 ath5k_hw_reg_write(ah,
267 AR5K_QUIET_CTL1_QT_EN |
268 AR5K_REG_SM(ath5k_hw_reg_read(ah,
269 AR5K_TSF_L32_5211) >> 10,
270 AR5K_QUIET_CTL1_NEXT_QT_TSF),
271 AR5K_QUIET_CTL1);
272
273
274 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
275 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
276
277
278 udelay(400);
279 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
280 AR5K_QUIET_CTL1_QT_EN);
281
282
283 i = 100;
284 do {
285 pending = ath5k_hw_reg_read(ah,
286 AR5K_QUEUE_STATUS(queue)) &
287 AR5K_QCU_STS_FRMPENDCNT;
288 udelay(100);
289 } while (--i && pending);
290
291 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
292 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
293
294 if (pending)
295 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
296 "quiet mechanism didn't work q:%i !\n",
297 queue);
298 }
299
300
301
302
303 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
304 AR5K_QCU_MISC_DCU_EARLY);
305
306
307 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
308 if (pending) {
309 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
310 "tx dma didn't stop (q:%i, frm:%i) !\n",
311 queue, pending);
312 return -EBUSY;
313 }
314 }
315
316
317 return 0;
318 }
319
320
321
322
323
324
325
326
327 int
328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
329 {
330 int ret;
331 ret = ath5k_hw_stop_tx_dma(ah, queue);
332 if (ret) {
333 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
334 "beacon queue didn't stop !\n");
335 return -EIO;
336 }
337 return 0;
338 }
339
340
341
342
343
344
345
346
347
348
349
350
351
352 u32
353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
354 {
355 u16 tx_reg;
356
357 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
358
359
360
361
362
363 if (ah->ah_version == AR5K_AR5210) {
364 switch (ah->ah_txq[queue].tqi_type) {
365 case AR5K_TX_QUEUE_DATA:
366 tx_reg = AR5K_NOQCU_TXDP0;
367 break;
368 case AR5K_TX_QUEUE_BEACON:
369 case AR5K_TX_QUEUE_CAB:
370 tx_reg = AR5K_NOQCU_TXDP1;
371 break;
372 default:
373 return 0xffffffff;
374 }
375 } else {
376 tx_reg = AR5K_QUEUE_TXDP(queue);
377 }
378
379 return ath5k_hw_reg_read(ah, tx_reg);
380 }
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395 int
396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
397 {
398 u16 tx_reg;
399
400 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
401
402
403
404
405
406 if (ah->ah_version == AR5K_AR5210) {
407 switch (ah->ah_txq[queue].tqi_type) {
408 case AR5K_TX_QUEUE_DATA:
409 tx_reg = AR5K_NOQCU_TXDP0;
410 break;
411 case AR5K_TX_QUEUE_BEACON:
412 case AR5K_TX_QUEUE_CAB:
413 tx_reg = AR5K_NOQCU_TXDP1;
414 break;
415 default:
416 return -EINVAL;
417 }
418 } else {
419
420
421
422
423
424 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
425 return -EIO;
426
427 tx_reg = AR5K_QUEUE_TXDP(queue);
428 }
429
430
431 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
432
433 return 0;
434 }
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 int
453 ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
454 {
455 u32 trigger_level, imr;
456 int ret = -EIO;
457
458
459
460
461 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
462
463 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
464 AR5K_TXCFG_TXFULL);
465
466 if (!increase) {
467 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
468 goto done;
469 } else
470 trigger_level +=
471 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
472
473
474
475
476 if (ah->ah_version == AR5K_AR5210)
477 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
478 else
479 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
480 AR5K_TXCFG_TXFULL, trigger_level);
481
482 ret = 0;
483
484 done:
485
486
487
488 ath5k_hw_set_imr(ah, imr);
489
490 return ret;
491 }
492
493
494
495
496
497
498
499
500
501
502
503
504
505 bool
506 ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
507 {
508 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
509 }
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526 int
527 ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
528 {
529 u32 data = 0;
530
531
532
533
534
535
536
537 if (ah->ah_version == AR5K_AR5210) {
538 u32 isr = 0;
539 isr = ath5k_hw_reg_read(ah, AR5K_ISR);
540 if (unlikely(isr == AR5K_INT_NOCARD)) {
541 *interrupt_mask = isr;
542 return -ENODEV;
543 }
544
545
546
547
548
549 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
550
551
552 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
553 | AR5K_ISR_DPERR)))
554 *interrupt_mask |= AR5K_INT_FATAL;
555
556
557
558
559
560
561
562
563
564 data = isr;
565 } else {
566 u32 pisr = 0;
567 u32 pisr_clear = 0;
568 u32 sisr0 = 0;
569 u32 sisr1 = 0;
570 u32 sisr2 = 0;
571 u32 sisr3 = 0;
572 u32 sisr4 = 0;
573
574
575 pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
576 if (unlikely(pisr == AR5K_INT_NOCARD)) {
577 *interrupt_mask = pisr;
578 return -ENODEV;
579 }
580
581 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
582 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
583 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
584 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
585 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627 pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
628 (pisr & AR5K_INT_TX_ALL);
629
630
631
632
633
634
635
636
637
638 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
639 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
640 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
641 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
642 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
643 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
644
645 ath5k_hw_reg_read(ah, AR5K_PISR);
646
647
648
649
650
651 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
652
653
654
655
656
657 if (pisr & AR5K_ISR_TXOK)
658 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
659 AR5K_SISR0_QCU_TXOK);
660
661 if (pisr & AR5K_ISR_TXDESC)
662 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
663 AR5K_SISR0_QCU_TXDESC);
664
665 if (pisr & AR5K_ISR_TXERR)
666 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
667 AR5K_SISR1_QCU_TXERR);
668
669 if (pisr & AR5K_ISR_TXEOL)
670 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
671 AR5K_SISR1_QCU_TXEOL);
672
673
674
675
676 if (pisr & AR5K_ISR_TXURN)
677 ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
678 AR5K_SISR2_QCU_TXURN);
679
680
681
682
683 if (pisr & AR5K_ISR_TIM)
684 *interrupt_mask |= AR5K_INT_TIM;
685
686
687 if (pisr & AR5K_ISR_BCNMISC) {
688 if (sisr2 & AR5K_SISR2_TIM)
689 *interrupt_mask |= AR5K_INT_TIM;
690 if (sisr2 & AR5K_SISR2_DTIM)
691 *interrupt_mask |= AR5K_INT_DTIM;
692 if (sisr2 & AR5K_SISR2_DTIM_SYNC)
693 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
694 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
695 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
696 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
697 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
698 }
699
700
701
702
703
704 if (unlikely(pisr & (AR5K_ISR_HIUERR)))
705 *interrupt_mask |= AR5K_INT_FATAL;
706
707
708 if (unlikely(pisr & (AR5K_ISR_BNR)))
709 *interrupt_mask |= AR5K_INT_BNR;
710
711
712 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
713 *interrupt_mask |= AR5K_INT_QCBRORN;
714 ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
715 AR5K_SISR3_QCBRORN);
716 }
717
718
719 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
720 *interrupt_mask |= AR5K_INT_QCBRURN;
721 ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
722 AR5K_SISR3_QCBRURN);
723 }
724
725
726 if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
727 *interrupt_mask |= AR5K_INT_QTRIG;
728 ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
729 AR5K_SISR4_QTRIG);
730 }
731
732 data = pisr;
733 }
734
735
736
737
738
739 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
740 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
741
742 return 0;
743 }
744
745
746
747
748
749
750
751
752
753
754 enum ath5k_int
755 ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
756 {
757 enum ath5k_int old_mask, int_mask;
758
759 old_mask = ah->ah_imr;
760
761
762
763
764
765
766 if (old_mask & AR5K_INT_GLOBAL) {
767 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
768 ath5k_hw_reg_read(ah, AR5K_IER);
769 }
770
771
772
773
774
775 int_mask = new_mask & AR5K_INT_COMMON;
776
777 if (ah->ah_version != AR5K_AR5210) {
778
779 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
780 & AR5K_SIMR2_QCU_TXURN;
781
782
783 if (new_mask & AR5K_INT_FATAL) {
784 int_mask |= AR5K_IMR_HIUERR;
785 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
786 | AR5K_SIMR2_DPERR);
787 }
788
789
790 if (new_mask & AR5K_INT_TIM)
791 int_mask |= AR5K_IMR_TIM;
792
793 if (new_mask & AR5K_INT_TIM)
794 simr2 |= AR5K_SISR2_TIM;
795 if (new_mask & AR5K_INT_DTIM)
796 simr2 |= AR5K_SISR2_DTIM;
797 if (new_mask & AR5K_INT_DTIM_SYNC)
798 simr2 |= AR5K_SISR2_DTIM_SYNC;
799 if (new_mask & AR5K_INT_BCN_TIMEOUT)
800 simr2 |= AR5K_SISR2_BCN_TIMEOUT;
801 if (new_mask & AR5K_INT_CAB_TIMEOUT)
802 simr2 |= AR5K_SISR2_CAB_TIMEOUT;
803
804
805 if (new_mask & AR5K_INT_BNR)
806 int_mask |= AR5K_INT_BNR;
807
808
809
810 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
811 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
812
813 } else {
814
815 if (new_mask & AR5K_INT_FATAL)
816 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
817 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
818
819
820 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
821 }
822
823
824
825 if (!(new_mask & AR5K_INT_RXNOFRM))
826 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
827
828
829 ah->ah_imr = new_mask;
830
831
832 if (new_mask & AR5K_INT_GLOBAL) {
833 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
834 ath5k_hw_reg_read(ah, AR5K_IER);
835 }
836
837 return old_mask;
838 }
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 void
856 ath5k_hw_dma_init(struct ath5k_hw *ah)
857 {
858
859
860
861
862
863
864
865
866
867
868
869
870
871 if (ah->ah_version != AR5K_AR5210) {
872 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
873 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
874 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
875 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
876 }
877
878
879 if (ah->ah_version != AR5K_AR5210)
880 ath5k_hw_set_imr(ah, ah->ah_imr);
881
882 }
883
884
885
886
887
888
889
890
891
892
893
894
895 int
896 ath5k_hw_dma_stop(struct ath5k_hw *ah)
897 {
898 int i, qmax, err;
899 err = 0;
900
901
902 ath5k_hw_set_imr(ah, 0);
903
904
905 err = ath5k_hw_stop_rx_dma(ah);
906 if (err)
907 return err;
908
909
910
911 if (ah->ah_version != AR5K_AR5210) {
912 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
913 qmax = AR5K_NUM_TX_QUEUES;
914 } else {
915
916 ath5k_hw_reg_read(ah, AR5K_ISR);
917 qmax = AR5K_NUM_TX_QUEUES_NOQCU;
918 }
919
920 for (i = 0; i < qmax; i++) {
921 err = ath5k_hw_stop_tx_dma(ah, i);
922
923 if (err && err != -EINVAL)
924 return err;
925 }
926
927 return 0;
928 }