This source file includes following definitions.
- qtnf_deassert_intx
- qtnf_topaz_intx_asserted
- qtnf_topaz_reset_ep
- setup_rx_irqs
- enable_rx_irqs
- disable_rx_irqs
- qtnf_topaz_ipc_gen_ep_int
- qtnf_is_state
- qtnf_set_state
- qtnf_poll_state
- topaz_alloc_bd_table
- topaz_skb2rbd_attach
- topaz_alloc_rx_buffers
- qtnf_topaz_free_xfer_buffers
- qtnf_pcie_topaz_init_xfer
- qtnf_topaz_data_tx_reclaim
- qtnf_try_stop_xmit
- qtnf_try_wake_xmit
- qtnf_tx_queue_ready
- qtnf_pcie_data_tx
- qtnf_pcie_topaz_interrupt
- qtnf_rx_data_ready
- qtnf_topaz_rx_poll
- qtnf_pcie_data_tx_timeout
- qtnf_pcie_data_rx_start
- qtnf_pcie_data_rx_stop
- qtnf_dbg_irq_stats
- qtnf_dbg_pkt_stats
- qtnf_reset_dma_offset
- qtnf_pcie_endian_detect
- qtnf_pre_init_ep
- qtnf_post_init_ep
- qtnf_ep_fw_load
- qtnf_topaz_fw_upload
- qtnf_topaz_fw_work_handler
- qtnf_reclaim_tasklet_fn
- qtnf_topaz_dma_mask_get
- qtnf_pcie_topaz_probe
- qtnf_pcie_topaz_remove
- qtnf_pcie_topaz_suspend
- qtnf_pcie_topaz_resume
- qtnf_pcie_topaz_alloc
1
2
3
4 #include <linux/kernel.h>
5 #include <linux/firmware.h>
6 #include <linux/pci.h>
7 #include <linux/vmalloc.h>
8 #include <linux/delay.h>
9 #include <linux/interrupt.h>
10 #include <linux/sched.h>
11 #include <linux/crc32.h>
12 #include <linux/completion.h>
13 #include <linux/spinlock.h>
14 #include <linux/circ_buf.h>
15
16 #include "pcie_priv.h"
17 #include "topaz_pcie_regs.h"
18 #include "topaz_pcie_ipc.h"
19 #include "qtn_hw_ids.h"
20 #include "core.h"
21 #include "bus.h"
22 #include "shm_ipc.h"
23 #include "debug.h"
24
25 #define TOPAZ_TX_BD_SIZE_DEFAULT 128
26
27 struct qtnf_topaz_tx_bd {
28 __le32 addr;
29 __le32 info;
30 } __packed;
31
32 struct qtnf_topaz_rx_bd {
33 __le32 addr;
34 __le32 info;
35 } __packed;
36
37 struct qtnf_extra_bd_params {
38 __le32 param1;
39 __le32 param2;
40 __le32 param3;
41 __le32 param4;
42 } __packed;
43
44 #define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n)
45
46 struct vmac_pkt_info {
47 __le32 addr;
48 __le32 info;
49 };
50
51 struct qtnf_topaz_bda {
52 __le16 bda_len;
53 __le16 bda_version;
54 __le32 bda_bootstate;
55 __le32 bda_dma_mask;
56 __le32 bda_dma_offset;
57 __le32 bda_flags;
58 __le32 bda_img;
59 __le32 bda_img_size;
60 __le32 bda_ep2h_irqstatus;
61 __le32 bda_h2ep_irqstatus;
62 __le32 bda_msi_addr;
63 u8 reserved1[56];
64 __le32 bda_flashsz;
65 u8 bda_boardname[PCIE_BDA_NAMELEN];
66 __le32 bda_pci_pre_status;
67 __le32 bda_pci_endian;
68 __le32 bda_pci_post_status;
69 __le32 bda_h2ep_txd_budget;
70 __le32 bda_ep2h_txd_budget;
71 __le32 bda_rc_rx_bd_base;
72 __le32 bda_rc_rx_bd_num;
73 __le32 bda_rc_tx_bd_base;
74 __le32 bda_rc_tx_bd_num;
75 u8 bda_ep_link_state;
76 u8 bda_rc_link_state;
77 u8 bda_rc_msi_enabled;
78 u8 reserved2;
79 __le32 bda_ep_next_pkt;
80 struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN];
81 struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096);
82 struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096);
83 } __packed;
84
85 struct qtnf_pcie_topaz_state {
86 struct qtnf_pcie_bus_priv base;
87 struct qtnf_topaz_bda __iomem *bda;
88
89 dma_addr_t dma_msi_dummy;
90 u32 dma_msi_imwr;
91
92 struct qtnf_topaz_tx_bd *tx_bd_vbase;
93 struct qtnf_topaz_rx_bd *rx_bd_vbase;
94
95 __le32 __iomem *ep_next_rx_pkt;
96 __le32 __iomem *txqueue_wake;
97 __le32 __iomem *ep_pmstate;
98
99 unsigned long rx_pkt_count;
100 };
101
102 static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
103 {
104 void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
105 u32 cfg;
106
107 cfg = readl(reg);
108 cfg &= ~TOPAZ_ASSERT_INTX;
109 qtnf_non_posted_write(cfg, reg);
110 }
111
112 static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
113 {
114 void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
115 u32 cfg = readl(reg);
116
117 return !!(cfg & TOPAZ_ASSERT_INTX);
118 }
119
120 static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
121 {
122 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ),
123 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
124 msleep(QTN_EP_RESET_WAIT_MS);
125 pci_restore_state(ts->base.pdev);
126 }
127
128 static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
129 {
130 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
131
132 ts->dma_msi_imwr = readl(reg);
133 }
134
135 static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
136 {
137 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
138
139 qtnf_non_posted_write(ts->dma_msi_imwr, reg);
140 }
141
142 static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
143 {
144 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
145
146 qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
147 }
148
149 static void qtnf_topaz_ipc_gen_ep_int(void *arg)
150 {
151 struct qtnf_pcie_topaz_state *ts = arg;
152
153 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ),
154 TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
155 }
156
157 static int qtnf_is_state(__le32 __iomem *reg, u32 state)
158 {
159 u32 s = readl(reg);
160
161 return (s == state);
162 }
163
164 static void qtnf_set_state(__le32 __iomem *reg, u32 state)
165 {
166 qtnf_non_posted_write(state, reg);
167 }
168
169 static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
170 {
171 u32 timeout = 0;
172
173 while ((qtnf_is_state(reg, state) == 0)) {
174 usleep_range(1000, 1200);
175 if (++timeout > delay_in_ms)
176 return -1;
177 }
178
179 return 0;
180 }
181
182 static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
183 struct qtnf_topaz_bda __iomem *bda)
184 {
185 struct qtnf_extra_bd_params __iomem *extra_params;
186 struct qtnf_pcie_bus_priv *priv = &ts->base;
187 dma_addr_t paddr;
188 void *vaddr;
189 int len;
190 int i;
191
192
193
194 len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) +
195 priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) +
196 sizeof(struct qtnf_extra_bd_params);
197
198 vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
199 if (!vaddr)
200 return -ENOMEM;
201
202 memset(vaddr, 0, len);
203
204
205
206 ts->tx_bd_vbase = vaddr;
207 qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base);
208
209 for (i = 0; i < priv->tx_bd_num; i++)
210 ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
211
212 pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
213
214 priv->tx_bd_r_index = 0;
215 priv->tx_bd_w_index = 0;
216
217
218
219 vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
220 paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd);
221
222 ts->rx_bd_vbase = vaddr;
223 qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base);
224
225 pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
226
227
228
229 vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
230 paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd);
231
232 extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
233
234 ts->ep_next_rx_pkt = &extra_params->param1;
235 qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1),
236 &bda->bda_ep_next_pkt);
237 ts->txqueue_wake = &extra_params->param2;
238 ts->ep_pmstate = &extra_params->param3;
239 ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
240
241 return 0;
242 }
243
244 static int
245 topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
246 {
247 struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
248 struct sk_buff *skb;
249 dma_addr_t paddr;
250
251 skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
252 if (!skb) {
253 ts->base.rx_skb[index] = NULL;
254 return -ENOMEM;
255 }
256
257 ts->base.rx_skb[index] = skb;
258
259 paddr = pci_map_single(ts->base.pdev, skb->data,
260 SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
261 if (pci_dma_mapping_error(ts->base.pdev, paddr)) {
262 pr_err("skb mapping error: %pad\n", &paddr);
263 return -ENOMEM;
264 }
265
266 rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
267 rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap);
268
269 ts->base.rx_bd_w_index = index;
270
271 return 0;
272 }
273
274 static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
275 {
276 u16 i;
277 int ret = 0;
278
279 memset(ts->rx_bd_vbase, 0x0,
280 ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
281
282 for (i = 0; i < ts->base.rx_bd_num; i++) {
283 ret = topaz_skb2rbd_attach(ts, i, 0);
284 if (ret)
285 break;
286 }
287
288 ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
289 cpu_to_le32(QTN_BD_WRAP);
290
291 return ret;
292 }
293
294
295 static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
296 {
297 struct qtnf_pcie_bus_priv *priv = &ts->base;
298 struct qtnf_topaz_rx_bd *rxbd;
299 struct qtnf_topaz_tx_bd *txbd;
300 struct sk_buff *skb;
301 dma_addr_t paddr;
302 int i;
303
304
305 for (i = 0; i < priv->rx_bd_num; i++) {
306 if (priv->rx_skb && priv->rx_skb[i]) {
307 rxbd = &ts->rx_bd_vbase[i];
308 skb = priv->rx_skb[i];
309 paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
310 pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
311 PCI_DMA_FROMDEVICE);
312 dev_kfree_skb_any(skb);
313 priv->rx_skb[i] = NULL;
314 rxbd->addr = 0;
315 rxbd->info = 0;
316 }
317 }
318
319
320 for (i = 0; i < priv->tx_bd_num; i++) {
321 if (priv->tx_skb && priv->tx_skb[i]) {
322 txbd = &ts->tx_bd_vbase[i];
323 skb = priv->tx_skb[i];
324 paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
325 pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
326 PCI_DMA_TODEVICE);
327 dev_kfree_skb_any(skb);
328 priv->tx_skb[i] = NULL;
329 txbd->addr = 0;
330 txbd->info = 0;
331 }
332 }
333 }
334
335 static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
336 unsigned int tx_bd_size)
337 {
338 struct qtnf_topaz_bda __iomem *bda = ts->bda;
339 struct qtnf_pcie_bus_priv *priv = &ts->base;
340 int ret;
341
342 if (tx_bd_size == 0)
343 tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT;
344
345
346 if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) {
347 pr_warn("TX BD queue cannot exceed %d\n",
348 QTN_PCIE_RC_TX_QUEUE_LEN);
349 tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN;
350 }
351
352 priv->tx_bd_num = tx_bd_size;
353 qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
354 qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
355
356 priv->rx_bd_w_index = 0;
357 priv->rx_bd_r_index = 0;
358
359 ret = qtnf_pcie_alloc_skb_array(priv);
360 if (ret) {
361 pr_err("failed to allocate skb array\n");
362 return ret;
363 }
364
365 ret = topaz_alloc_bd_table(ts, bda);
366 if (ret) {
367 pr_err("failed to allocate bd table\n");
368 return ret;
369 }
370
371 ret = topaz_alloc_rx_buffers(ts);
372 if (ret) {
373 pr_err("failed to allocate rx buffers\n");
374 return ret;
375 }
376
377 return ret;
378 }
379
380 static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
381 {
382 struct qtnf_pcie_bus_priv *priv = &ts->base;
383 struct qtnf_topaz_tx_bd *txbd;
384 struct sk_buff *skb;
385 unsigned long flags;
386 dma_addr_t paddr;
387 u32 tx_done_index;
388 int count = 0;
389 int i;
390
391 spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
392
393 tx_done_index = readl(ts->ep_next_rx_pkt);
394 i = priv->tx_bd_r_index;
395
396 if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num))
397 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
398 TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
399
400 while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
401 skb = priv->tx_skb[i];
402
403 if (likely(skb)) {
404 txbd = &ts->tx_bd_vbase[i];
405 paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
406 pci_unmap_single(priv->pdev, paddr, skb->len,
407 PCI_DMA_TODEVICE);
408
409 if (skb->dev) {
410 qtnf_update_tx_stats(skb->dev, skb);
411 if (unlikely(priv->tx_stopped)) {
412 qtnf_wake_all_queues(skb->dev);
413 priv->tx_stopped = 0;
414 }
415 }
416
417 dev_kfree_skb_any(skb);
418 }
419
420 priv->tx_skb[i] = NULL;
421 count++;
422
423 if (++i >= priv->tx_bd_num)
424 i = 0;
425 }
426
427 priv->tx_reclaim_done += count;
428 priv->tx_reclaim_req++;
429 priv->tx_bd_r_index = i;
430
431 spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
432 }
433
434 static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev)
435 {
436 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
437
438 if (ndev) {
439 netif_tx_stop_all_queues(ndev);
440 ts->base.tx_stopped = 1;
441 }
442
443 writel(0x0, ts->txqueue_wake);
444
445
446 dma_wmb();
447
448
449 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
450 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
451
452
453 tasklet_hi_schedule(&ts->base.reclaim_tq);
454 }
455
456 static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev)
457 {
458 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
459 int ready;
460
461 ready = readl(ts->txqueue_wake);
462 if (ready) {
463 netif_wake_queue(ndev);
464 } else {
465
466 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
467 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
468 }
469 }
470
471 static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
472 {
473 struct qtnf_pcie_bus_priv *priv = &ts->base;
474
475 if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
476 priv->tx_bd_num)) {
477 qtnf_topaz_data_tx_reclaim(ts);
478
479 if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
480 priv->tx_bd_num)) {
481 priv->tx_full_count++;
482 return 0;
483 }
484 }
485
486 return 1;
487 }
488
489 static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
490 {
491 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
492 struct qtnf_pcie_bus_priv *priv = &ts->base;
493 struct qtnf_topaz_bda __iomem *bda = ts->bda;
494 struct qtnf_topaz_tx_bd *txbd;
495 dma_addr_t skb_paddr;
496 unsigned long flags;
497 int ret = 0;
498 int len;
499 int i;
500
501 if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
502 qtnf_packet_send_hi_pri(skb);
503 qtnf_update_tx_stats(skb->dev, skb);
504 priv->tx_eapol++;
505 return NETDEV_TX_OK;
506 }
507
508 spin_lock_irqsave(&priv->tx_lock, flags);
509
510 if (!qtnf_tx_queue_ready(ts)) {
511 qtnf_try_stop_xmit(bus, skb->dev);
512 spin_unlock_irqrestore(&priv->tx_lock, flags);
513 return NETDEV_TX_BUSY;
514 }
515
516 i = priv->tx_bd_w_index;
517 priv->tx_skb[i] = skb;
518 len = skb->len;
519
520 skb_paddr = pci_map_single(priv->pdev, skb->data,
521 skb->len, PCI_DMA_TODEVICE);
522 if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
523 ret = -ENOMEM;
524 goto tx_done;
525 }
526
527 txbd = &ts->tx_bd_vbase[i];
528 txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
529
530 writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr);
531 writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info);
532
533
534 dma_wmb();
535
536
537 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
538 TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
539
540 if (++i >= priv->tx_bd_num)
541 i = 0;
542
543 priv->tx_bd_w_index = i;
544
545 tx_done:
546 if (ret) {
547 if (skb->dev)
548 skb->dev->stats.tx_dropped++;
549 dev_kfree_skb_any(skb);
550 }
551
552 priv->tx_done_count++;
553 spin_unlock_irqrestore(&priv->tx_lock, flags);
554
555 qtnf_topaz_data_tx_reclaim(ts);
556
557 return NETDEV_TX_OK;
558 }
559
560 static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
561 {
562 struct qtnf_bus *bus = (struct qtnf_bus *)data;
563 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
564 struct qtnf_pcie_bus_priv *priv = &ts->base;
565
566 if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
567 return IRQ_NONE;
568
569 if (!priv->msi_enabled)
570 qtnf_deassert_intx(ts);
571
572 priv->pcie_irq_count++;
573
574 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
575 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
576
577 if (napi_schedule_prep(&bus->mux_napi)) {
578 disable_rx_irqs(ts);
579 __napi_schedule(&bus->mux_napi);
580 }
581
582 tasklet_hi_schedule(&priv->reclaim_tq);
583
584 return IRQ_HANDLED;
585 }
586
587 static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
588 {
589 u16 index = ts->base.rx_bd_r_index;
590 struct qtnf_topaz_rx_bd *rxbd;
591 u32 descw;
592
593 rxbd = &ts->rx_bd_vbase[index];
594 descw = le32_to_cpu(rxbd->info);
595
596 if (descw & QTN_BD_EMPTY)
597 return 0;
598
599 return 1;
600 }
601
602 static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
603 {
604 struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
605 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
606 struct qtnf_pcie_bus_priv *priv = &ts->base;
607 struct net_device *ndev = NULL;
608 struct sk_buff *skb = NULL;
609 int processed = 0;
610 struct qtnf_topaz_rx_bd *rxbd;
611 dma_addr_t skb_paddr;
612 int consume;
613 u32 descw;
614 u32 poffset;
615 u32 psize;
616 u16 r_idx;
617 u16 w_idx;
618 int ret;
619
620 while (processed < budget) {
621 if (!qtnf_rx_data_ready(ts))
622 goto rx_out;
623
624 r_idx = priv->rx_bd_r_index;
625 rxbd = &ts->rx_bd_vbase[r_idx];
626 descw = le32_to_cpu(rxbd->info);
627
628 skb = priv->rx_skb[r_idx];
629 poffset = QTN_GET_OFFSET(descw);
630 psize = QTN_GET_LEN(descw);
631 consume = 1;
632
633 if (descw & QTN_BD_EMPTY) {
634 pr_warn("skip invalid rxbd[%d]\n", r_idx);
635 consume = 0;
636 }
637
638 if (!skb) {
639 pr_warn("skip missing rx_skb[%d]\n", r_idx);
640 consume = 0;
641 }
642
643 if (skb && (skb_tailroom(skb) < psize)) {
644 pr_err("skip packet with invalid length: %u > %u\n",
645 psize, skb_tailroom(skb));
646 consume = 0;
647 }
648
649 if (skb) {
650 skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
651 pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
652 PCI_DMA_FROMDEVICE);
653 }
654
655 if (consume) {
656 skb_reserve(skb, poffset);
657 skb_put(skb, psize);
658 ndev = qtnf_classify_skb(bus, skb);
659 if (likely(ndev)) {
660 qtnf_update_rx_stats(ndev, skb);
661 skb->protocol = eth_type_trans(skb, ndev);
662 netif_receive_skb(skb);
663 } else {
664 pr_debug("drop untagged skb\n");
665 bus->mux_dev.stats.rx_dropped++;
666 dev_kfree_skb_any(skb);
667 }
668 } else {
669 if (skb) {
670 bus->mux_dev.stats.rx_dropped++;
671 dev_kfree_skb_any(skb);
672 }
673 }
674
675
676 if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
677 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ),
678 TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
679
680 priv->rx_skb[r_idx] = NULL;
681 if (++r_idx >= priv->rx_bd_num)
682 r_idx = 0;
683
684 priv->rx_bd_r_index = r_idx;
685
686
687 w_idx = priv->rx_bd_w_index;
688 while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
689 priv->rx_bd_num) > 0) {
690 if (++w_idx >= priv->rx_bd_num)
691 w_idx = 0;
692
693 ret = topaz_skb2rbd_attach(ts, w_idx,
694 descw & QTN_BD_WRAP);
695 if (ret) {
696 pr_err("failed to allocate new rx_skb[%d]\n",
697 w_idx);
698 break;
699 }
700 }
701
702 processed++;
703 }
704
705 rx_out:
706 if (processed < budget) {
707 napi_complete(napi);
708 enable_rx_irqs(ts);
709 }
710
711 return processed;
712 }
713
714 static void
715 qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
716 {
717 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
718
719 qtnf_try_wake_xmit(bus, ndev);
720 tasklet_hi_schedule(&ts->base.reclaim_tq);
721 }
722
723 static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
724 {
725 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
726
727 napi_enable(&bus->mux_napi);
728 enable_rx_irqs(ts);
729 }
730
731 static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
732 {
733 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
734
735 disable_rx_irqs(ts);
736 napi_disable(&bus->mux_napi);
737 }
738
739 static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
740
741 .control_tx = qtnf_pcie_control_tx,
742
743
744 .data_tx = qtnf_pcie_data_tx,
745 .data_tx_timeout = qtnf_pcie_data_tx_timeout,
746 .data_rx_start = qtnf_pcie_data_rx_start,
747 .data_rx_stop = qtnf_pcie_data_rx_stop,
748 };
749
750 static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
751 {
752 struct qtnf_bus *bus = dev_get_drvdata(s->private);
753 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
754
755 seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
756
757 return 0;
758 }
759
760 static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
761 {
762 struct qtnf_bus *bus = dev_get_drvdata(s->private);
763 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
764 struct qtnf_pcie_bus_priv *priv = &ts->base;
765 u32 tx_done_index = readl(ts->ep_next_rx_pkt);
766
767 seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
768 seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
769 seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
770 seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
771 seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
772
773 seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
774 seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
775 seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
776
777 seq_printf(s, "tx host queue len(%u)\n",
778 CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
779 priv->tx_bd_num));
780 seq_printf(s, "tx reclaim queue len(%u)\n",
781 CIRC_CNT(tx_done_index, priv->tx_bd_r_index,
782 priv->tx_bd_num));
783 seq_printf(s, "tx card queue len(%u)\n",
784 CIRC_CNT(priv->tx_bd_w_index, tx_done_index,
785 priv->tx_bd_num));
786
787 seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
788 seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
789 seq_printf(s, "rx alloc queue len(%u)\n",
790 CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
791 priv->rx_bd_num));
792
793 return 0;
794 }
795
796 static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
797 {
798 struct qtnf_topaz_bda __iomem *bda = ts->bda;
799 u32 offset = readl(&bda->bda_dma_offset);
800
801 if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR)
802 return;
803
804 writel(0x0, &bda->bda_dma_offset);
805 }
806
807 static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
808 {
809 struct qtnf_topaz_bda __iomem *bda = ts->bda;
810 u32 timeout = 0;
811 u32 endian;
812 int ret = 0;
813
814 writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
815
816
817 dma_wmb();
818
819 writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
820
821 while (readl(&bda->bda_pci_post_status) !=
822 QTN_PCI_ENDIAN_VALID_STATUS) {
823 usleep_range(1000, 1200);
824 if (++timeout > QTN_FW_DL_TIMEOUT_MS) {
825 pr_err("card endianness detection timed out\n");
826 ret = -ETIMEDOUT;
827 goto endian_out;
828 }
829 }
830
831
832 dma_rmb();
833
834 endian = readl(&bda->bda_pci_endian);
835 WARN(endian != QTN_PCI_LITTLE_ENDIAN,
836 "%s: unexpected card endianness", __func__);
837
838 endian_out:
839 writel(0, &bda->bda_pci_pre_status);
840 writel(0, &bda->bda_pci_post_status);
841 writel(0, &bda->bda_pci_endian);
842
843 return ret;
844 }
845
846 static int qtnf_pre_init_ep(struct qtnf_bus *bus)
847 {
848 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
849 struct qtnf_topaz_bda __iomem *bda = ts->bda;
850 u32 flags;
851 int ret;
852
853 ret = qtnf_pcie_endian_detect(ts);
854 if (ret < 0) {
855 pr_err("failed to detect card endianness\n");
856 return ret;
857 }
858
859 writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
860 qtnf_reset_dma_offset(ts);
861
862
863 flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV;
864
865 if (ts->base.flashboot)
866 flags |= QTN_BDA_FLASH_BOOT;
867 else
868 flags &= ~QTN_BDA_FLASH_BOOT;
869
870 writel(flags, &bda->bda_flags);
871
872 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
873 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
874 QTN_FW_DL_TIMEOUT_MS)) {
875 pr_err("card is not ready to boot...\n");
876 return -ETIMEDOUT;
877 }
878
879 return ret;
880 }
881
882 static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
883 {
884 struct pci_dev *pdev = ts->base.pdev;
885
886 setup_rx_irqs(ts);
887 disable_rx_irqs(ts);
888
889 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
890 QTN_FW_QLINK_TIMEOUT_MS))
891 return -ETIMEDOUT;
892
893 enable_irq(pdev->irq);
894 return 0;
895 }
896
897 static int
898 qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
899 {
900 struct qtnf_topaz_bda __iomem *bda = ts->bda;
901 struct pci_dev *pdev = ts->base.pdev;
902 u32 remaining = fw_size;
903 u8 *curr = (u8 *)fw;
904 u32 blksize;
905 u32 nblocks;
906 u32 offset;
907 u32 count;
908 u32 size;
909 dma_addr_t paddr;
910 void *data;
911 int ret = 0;
912
913 pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size);
914
915 blksize = ts->base.fw_blksize;
916
917 if (blksize < PAGE_SIZE)
918 blksize = PAGE_SIZE;
919
920 while (blksize >= PAGE_SIZE) {
921 pr_debug("allocating %u bytes to upload FW\n", blksize);
922 data = dma_alloc_coherent(&pdev->dev, blksize,
923 &paddr, GFP_KERNEL);
924 if (data)
925 break;
926 blksize /= 2;
927 }
928
929 if (!data) {
930 pr_err("failed to allocate DMA buffer for FW upload\n");
931 ret = -ENOMEM;
932 goto fw_load_out;
933 }
934
935 nblocks = NBLOCKS(fw_size, blksize);
936 offset = readl(&bda->bda_dma_offset);
937
938 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
939 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
940 QTN_FW_DL_TIMEOUT_MS)) {
941 pr_err("card is not ready to download FW\n");
942 ret = -ETIMEDOUT;
943 goto fw_load_map;
944 }
945
946 for (count = 0 ; count < nblocks; count++) {
947 size = (remaining > blksize) ? blksize : remaining;
948
949 memcpy(data, curr, size);
950 qtnf_non_posted_write(paddr + offset, &bda->bda_img);
951 qtnf_non_posted_write(size, &bda->bda_img_size);
952
953 pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
954 count, (void *)curr, &paddr, size);
955
956 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
957 if (qtnf_poll_state(&ts->bda->bda_bootstate,
958 QTN_BDA_FW_BLOCK_DONE,
959 QTN_FW_DL_TIMEOUT_MS)) {
960 pr_err("confirmation for block #%d timed out\n", count);
961 ret = -ETIMEDOUT;
962 goto fw_load_map;
963 }
964
965 remaining = (remaining < size) ? remaining : (remaining - size);
966 curr += size;
967 }
968
969
970 qtnf_non_posted_write(0, &bda->bda_img);
971 qtnf_non_posted_write(0, &bda->bda_img_size);
972
973 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
974 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
975 QTN_FW_DL_TIMEOUT_MS)) {
976 pr_err("confirmation for the last block timed out\n");
977 ret = -ETIMEDOUT;
978 goto fw_load_map;
979 }
980
981
982 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
983 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
984 QTN_FW_DL_TIMEOUT_MS)) {
985 pr_err("confirmation for FW upload completion timed out\n");
986 ret = -ETIMEDOUT;
987 goto fw_load_map;
988 }
989
990 pr_debug("FW upload completed: totally sent %d blocks\n", count);
991
992 fw_load_map:
993 dma_free_coherent(&pdev->dev, blksize, data, paddr);
994
995 fw_load_out:
996 return ret;
997 }
998
999 static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
1000 const char *fwname)
1001 {
1002 const struct firmware *fw;
1003 struct pci_dev *pdev = ts->base.pdev;
1004 int ret;
1005
1006 if (qtnf_poll_state(&ts->bda->bda_bootstate,
1007 QTN_BDA_FW_LOAD_RDY,
1008 QTN_FW_DL_TIMEOUT_MS)) {
1009 pr_err("%s: card is not ready\n", fwname);
1010 return -1;
1011 }
1012
1013 pr_info("starting firmware upload: %s\n", fwname);
1014
1015 ret = request_firmware(&fw, fwname, &pdev->dev);
1016 if (ret < 0) {
1017 pr_err("%s: request_firmware error %d\n", fwname, ret);
1018 return -1;
1019 }
1020
1021 ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
1022 release_firmware(fw);
1023
1024 if (ret)
1025 pr_err("%s: FW upload error\n", fwname);
1026
1027 return ret;
1028 }
1029
1030 static void qtnf_topaz_fw_work_handler(struct work_struct *work)
1031 {
1032 struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1033 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
1034 int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
1035 struct pci_dev *pdev = ts->base.pdev;
1036 int ret;
1037
1038 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
1039
1040 if (bootloader_needed) {
1041 ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
1042 if (ret)
1043 goto fw_load_exit;
1044
1045 ret = qtnf_pre_init_ep(bus);
1046 if (ret)
1047 goto fw_load_exit;
1048
1049 qtnf_set_state(&ts->bda->bda_bootstate,
1050 QTN_BDA_FW_TARGET_BOOT);
1051 }
1052
1053 if (ts->base.flashboot) {
1054 pr_info("booting firmware from flash\n");
1055
1056 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1057 QTN_BDA_FW_FLASH_BOOT,
1058 QTN_FW_DL_TIMEOUT_MS);
1059 if (ret)
1060 goto fw_load_exit;
1061 } else {
1062 ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
1063 if (ret)
1064 goto fw_load_exit;
1065
1066 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
1067 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1068 QTN_BDA_FW_CONFIG,
1069 QTN_FW_QLINK_TIMEOUT_MS);
1070 if (ret) {
1071 pr_err("FW bringup timed out\n");
1072 goto fw_load_exit;
1073 }
1074
1075 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
1076 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1077 QTN_BDA_FW_RUNNING,
1078 QTN_FW_QLINK_TIMEOUT_MS);
1079 if (ret) {
1080 pr_err("card bringup timed out\n");
1081 goto fw_load_exit;
1082 }
1083 }
1084
1085 ret = qtnf_post_init_ep(ts);
1086 if (ret) {
1087 pr_err("FW runtime failure\n");
1088 goto fw_load_exit;
1089 }
1090
1091 pr_info("firmware is up and running\n");
1092
1093 ret = qtnf_pcie_fw_boot_done(bus);
1094 if (ret)
1095 goto fw_load_exit;
1096
1097 qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
1098 qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1099
1100 fw_load_exit:
1101 put_device(&pdev->dev);
1102 }
1103
1104 static void qtnf_reclaim_tasklet_fn(unsigned long data)
1105 {
1106 struct qtnf_pcie_topaz_state *ts = (void *)data;
1107
1108 qtnf_topaz_data_tx_reclaim(ts);
1109 }
1110
1111 static u64 qtnf_topaz_dma_mask_get(void)
1112 {
1113 return DMA_BIT_MASK(32);
1114 }
1115
1116 static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
1117 {
1118 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1119 struct pci_dev *pdev = ts->base.pdev;
1120 struct qtnf_shm_ipc_int ipc_int;
1121 unsigned long irqflags;
1122 int ret;
1123
1124 bus->bus_ops = &qtnf_pcie_topaz_bus_ops;
1125 INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler);
1126 ts->bda = ts->base.epmem_bar;
1127
1128
1129 if (ts->base.msi_enabled)
1130 irqflags = IRQF_NOBALANCING;
1131 else
1132 irqflags = IRQF_NOBALANCING | IRQF_SHARED;
1133
1134 ret = devm_request_irq(&pdev->dev, pdev->irq,
1135 &qtnf_pcie_topaz_interrupt,
1136 irqflags, "qtnf_topaz_irq", (void *)bus);
1137 if (ret) {
1138 pr_err("failed to request pcie irq %d\n", pdev->irq);
1139 return ret;
1140 }
1141
1142 disable_irq(pdev->irq);
1143
1144 ret = qtnf_pre_init_ep(bus);
1145 if (ret) {
1146 pr_err("failed to init card\n");
1147 return ret;
1148 }
1149
1150 ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
1151 if (ret) {
1152 pr_err("PCIE xfer init failed\n");
1153 return ret;
1154 }
1155
1156 tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn,
1157 (unsigned long)ts);
1158 netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1159 qtnf_topaz_rx_poll, 10);
1160
1161 ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
1162 ipc_int.arg = ts;
1163 qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
1164 &ts->bda->bda_shm_reg2, &ipc_int);
1165
1166 return 0;
1167 }
1168
1169 static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus)
1170 {
1171 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1172
1173 qtnf_topaz_reset_ep(ts);
1174 qtnf_topaz_free_xfer_buffers(ts);
1175 }
1176
1177 #ifdef CONFIG_PM_SLEEP
1178 static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus)
1179 {
1180 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1181 struct pci_dev *pdev = ts->base.pdev;
1182
1183 writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
1184 dma_wmb();
1185 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
1186 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1187
1188 pci_save_state(pdev);
1189 pci_enable_wake(pdev, PCI_D3hot, 1);
1190 pci_set_power_state(pdev, PCI_D3hot);
1191
1192 return 0;
1193 }
1194
1195 static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus)
1196 {
1197 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1198 struct pci_dev *pdev = ts->base.pdev;
1199
1200 pci_set_power_state(pdev, PCI_D0);
1201 pci_restore_state(pdev);
1202 pci_enable_wake(pdev, PCI_D0, 0);
1203
1204 writel((u32 __force)PCI_D0, ts->ep_pmstate);
1205 dma_wmb();
1206 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
1207 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1208
1209 return 0;
1210 }
1211 #endif
1212
1213 struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev)
1214 {
1215 struct qtnf_bus *bus;
1216 struct qtnf_pcie_topaz_state *ts;
1217
1218 bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
1219 if (!bus)
1220 return NULL;
1221
1222 ts = get_bus_priv(bus);
1223 ts->base.probe_cb = qtnf_pcie_topaz_probe;
1224 ts->base.remove_cb = qtnf_pcie_topaz_remove;
1225 ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
1226 #ifdef CONFIG_PM_SLEEP
1227 ts->base.resume_cb = qtnf_pcie_topaz_resume;
1228 ts->base.suspend_cb = qtnf_pcie_topaz_suspend;
1229 #endif
1230
1231 return bus;
1232 }