Lines Matching refs:ppd
125 static void clear_sdma_activelist(struct qib_pportdata *ppd) in clear_sdma_activelist() argument
129 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) { in clear_sdma_activelist()
136 unmap_desc(ppd, idx); in clear_sdma_activelist()
137 if (++idx == ppd->sdma_descq_cnt) in clear_sdma_activelist()
148 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque; in sdma_sw_clean_up_task() local
151 spin_lock_irqsave(&ppd->sdma_lock, flags); in sdma_sw_clean_up_task()
163 qib_sdma_make_progress(ppd); in sdma_sw_clean_up_task()
165 clear_sdma_activelist(ppd); in sdma_sw_clean_up_task()
171 ppd->sdma_descq_removed = ppd->sdma_descq_added; in sdma_sw_clean_up_task()
178 ppd->sdma_descq_tail = 0; in sdma_sw_clean_up_task()
179 ppd->sdma_descq_head = 0; in sdma_sw_clean_up_task()
180 ppd->sdma_head_dma[0] = 0; in sdma_sw_clean_up_task()
181 ppd->sdma_generation = 0; in sdma_sw_clean_up_task()
183 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned); in sdma_sw_clean_up_task()
185 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in sdma_sw_clean_up_task()
193 static void sdma_hw_start_up(struct qib_pportdata *ppd) in sdma_hw_start_up() argument
195 struct qib_sdma_state *ss = &ppd->sdma_state; in sdma_hw_start_up()
199 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno)); in sdma_hw_start_up()
201 ppd->dd->f_sdma_hw_start_up(ppd); in sdma_hw_start_up()
204 static void sdma_sw_tear_down(struct qib_pportdata *ppd) in sdma_sw_tear_down() argument
206 struct qib_sdma_state *ss = &ppd->sdma_state; in sdma_sw_tear_down()
212 static void sdma_start_sw_clean_up(struct qib_pportdata *ppd) in sdma_start_sw_clean_up() argument
214 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task); in sdma_start_sw_clean_up()
217 static void sdma_set_state(struct qib_pportdata *ppd, in sdma_set_state() argument
220 struct qib_sdma_state *ss = &ppd->sdma_state; in sdma_set_state()
250 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op); in sdma_set_state()
253 static void unmap_desc(struct qib_pportdata *ppd, unsigned head) in unmap_desc() argument
255 __le64 *descqp = &ppd->sdma_descq[head].qw[0]; in unmap_desc()
265 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE); in unmap_desc()
268 static int alloc_sdma(struct qib_pportdata *ppd) in alloc_sdma() argument
270 ppd->sdma_descq_cnt = sdma_descq_cnt; in alloc_sdma()
271 if (!ppd->sdma_descq_cnt) in alloc_sdma()
272 ppd->sdma_descq_cnt = 256; in alloc_sdma()
275 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev, in alloc_sdma()
276 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys, in alloc_sdma()
279 if (!ppd->sdma_descq) { in alloc_sdma()
280 qib_dev_err(ppd->dd, in alloc_sdma()
286 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, in alloc_sdma()
287 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); in alloc_sdma()
288 if (!ppd->sdma_head_dma) { in alloc_sdma()
289 qib_dev_err(ppd->dd, in alloc_sdma()
293 ppd->sdma_head_dma[0] = 0; in alloc_sdma()
297 dma_free_coherent(&ppd->dd->pcidev->dev, in alloc_sdma()
298 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq, in alloc_sdma()
299 ppd->sdma_descq_phys); in alloc_sdma()
300 ppd->sdma_descq = NULL; in alloc_sdma()
301 ppd->sdma_descq_phys = 0; in alloc_sdma()
303 ppd->sdma_descq_cnt = 0; in alloc_sdma()
307 static void free_sdma(struct qib_pportdata *ppd) in free_sdma() argument
309 struct qib_devdata *dd = ppd->dd; in free_sdma()
311 if (ppd->sdma_head_dma) { in free_sdma()
313 (void *)ppd->sdma_head_dma, in free_sdma()
314 ppd->sdma_head_phys); in free_sdma()
315 ppd->sdma_head_dma = NULL; in free_sdma()
316 ppd->sdma_head_phys = 0; in free_sdma()
319 if (ppd->sdma_descq) { in free_sdma()
321 ppd->sdma_descq_cnt * sizeof(u64[2]), in free_sdma()
322 ppd->sdma_descq, ppd->sdma_descq_phys); in free_sdma()
323 ppd->sdma_descq = NULL; in free_sdma()
324 ppd->sdma_descq_phys = 0; in free_sdma()
328 static inline void make_sdma_desc(struct qib_pportdata *ppd, in make_sdma_desc() argument
339 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) << in make_sdma_desc()
348 int qib_sdma_make_progress(struct qib_pportdata *ppd) in qib_sdma_make_progress() argument
352 struct qib_devdata *dd = ppd->dd; in qib_sdma_make_progress()
357 hwhead = dd->f_sdma_gethead(ppd); in qib_sdma_make_progress()
365 if (!list_empty(&ppd->sdma_activelist)) { in qib_sdma_make_progress()
366 lp = ppd->sdma_activelist.next; in qib_sdma_make_progress()
371 while (ppd->sdma_descq_head != hwhead) { in qib_sdma_make_progress()
374 (idx == ppd->sdma_descq_head)) { in qib_sdma_make_progress()
375 unmap_desc(ppd, ppd->sdma_descq_head); in qib_sdma_make_progress()
376 if (++idx == ppd->sdma_descq_cnt) in qib_sdma_make_progress()
381 ppd->sdma_descq_removed++; in qib_sdma_make_progress()
384 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt) in qib_sdma_make_progress()
385 ppd->sdma_descq_head = 0; in qib_sdma_make_progress()
388 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) { in qib_sdma_make_progress()
394 if (list_empty(&ppd->sdma_activelist)) in qib_sdma_make_progress()
397 lp = ppd->sdma_activelist.next; in qib_sdma_make_progress()
406 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); in qib_sdma_make_progress()
413 void qib_sdma_intr(struct qib_pportdata *ppd) in qib_sdma_intr() argument
417 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_sdma_intr()
419 __qib_sdma_intr(ppd); in qib_sdma_intr()
421 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_sdma_intr()
424 void __qib_sdma_intr(struct qib_pportdata *ppd) in __qib_sdma_intr() argument
426 if (__qib_sdma_running(ppd)) { in __qib_sdma_intr()
427 qib_sdma_make_progress(ppd); in __qib_sdma_intr()
428 if (!list_empty(&ppd->sdma_userpending)) in __qib_sdma_intr()
429 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); in __qib_sdma_intr()
433 int qib_setup_sdma(struct qib_pportdata *ppd) in qib_setup_sdma() argument
435 struct qib_devdata *dd = ppd->dd; in qib_setup_sdma()
439 ret = alloc_sdma(ppd); in qib_setup_sdma()
444 ppd->dd->f_sdma_init_early(ppd); in qib_setup_sdma()
445 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_setup_sdma()
446 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in qib_setup_sdma()
447 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_setup_sdma()
450 kref_init(&ppd->sdma_state.kref); in qib_setup_sdma()
451 init_completion(&ppd->sdma_state.comp); in qib_setup_sdma()
453 ppd->sdma_generation = 0; in qib_setup_sdma()
454 ppd->sdma_descq_head = 0; in qib_setup_sdma()
455 ppd->sdma_descq_removed = 0; in qib_setup_sdma()
456 ppd->sdma_descq_added = 0; in qib_setup_sdma()
458 ppd->sdma_intrequest = 0; in qib_setup_sdma()
459 INIT_LIST_HEAD(&ppd->sdma_userpending); in qib_setup_sdma()
461 INIT_LIST_HEAD(&ppd->sdma_activelist); in qib_setup_sdma()
463 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task, in qib_setup_sdma()
464 (unsigned long)ppd); in qib_setup_sdma()
466 ret = dd->f_init_sdma_regs(ppd); in qib_setup_sdma()
470 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start); in qib_setup_sdma()
475 qib_teardown_sdma(ppd); in qib_setup_sdma()
480 void qib_teardown_sdma(struct qib_pportdata *ppd) in qib_teardown_sdma() argument
482 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); in qib_teardown_sdma()
489 sdma_finalput(&ppd->sdma_state); in qib_teardown_sdma()
491 free_sdma(ppd); in qib_teardown_sdma()
494 int qib_sdma_running(struct qib_pportdata *ppd) in qib_sdma_running() argument
499 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_sdma_running()
500 ret = __qib_sdma_running(ppd); in qib_sdma_running()
501 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_sdma_running()
513 static void complete_sdma_err_req(struct qib_pportdata *ppd, in complete_sdma_err_req() argument
520 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req()
521 clear_sdma_activelist(ppd); in complete_sdma_err_req()
533 int qib_sdma_verbs_send(struct qib_pportdata *ppd, in qib_sdma_verbs_send() argument
547 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_sdma_verbs_send()
550 if (unlikely(!__qib_sdma_running(ppd))) { in qib_sdma_verbs_send()
551 complete_sdma_err_req(ppd, tx); in qib_sdma_verbs_send()
555 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send()
556 if (qib_sdma_make_progress(ppd)) in qib_sdma_verbs_send()
558 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT) in qib_sdma_verbs_send()
559 ppd->dd->f_sdma_set_desc_cnt(ppd, in qib_sdma_verbs_send()
560 ppd->sdma_descq_cnt / 2); in qib_sdma_verbs_send()
565 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send()
572 tail = ppd->sdma_descq_tail; in qib_sdma_verbs_send()
573 descqp = &ppd->sdma_descq[tail].qw[0]; in qib_sdma_verbs_send()
578 if (++tail == ppd->sdma_descq_cnt) { in qib_sdma_verbs_send()
580 descqp = &ppd->sdma_descq[0].qw[0]; in qib_sdma_verbs_send()
581 ++ppd->sdma_generation; in qib_sdma_verbs_send()
598 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, in qib_sdma_verbs_send()
600 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) in qib_sdma_verbs_send()
603 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset); in qib_sdma_verbs_send()
612 if (++tail == ppd->sdma_descq_cnt) { in qib_sdma_verbs_send()
614 descqp = &ppd->sdma_descq[0].qw[0]; in qib_sdma_verbs_send()
615 ++ppd->sdma_generation; in qib_sdma_verbs_send()
640 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0]; in qib_sdma_verbs_send()
650 ppd->dd->f_sdma_update_tail(ppd, tail); in qib_sdma_verbs_send()
651 ppd->sdma_descq_added += tx->txreq.sg_count; in qib_sdma_verbs_send()
652 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in qib_sdma_verbs_send()
658 tail = ppd->sdma_descq_cnt - 1; in qib_sdma_verbs_send()
661 if (tail == ppd->sdma_descq_tail) in qib_sdma_verbs_send()
663 unmap_desc(ppd, tail); in qib_sdma_verbs_send()
694 dev = &ppd->dd->verbs_dev; in qib_sdma_verbs_send()
699 ibp = &ppd->ibport_data; in qib_sdma_verbs_send()
713 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_sdma_verbs_send()
720 void dump_sdma_state(struct qib_pportdata *ppd) in dump_sdma_state() argument
730 head = ppd->sdma_descq_head; in dump_sdma_state()
731 tail = ppd->sdma_descq_tail; in dump_sdma_state()
732 cnt = qib_sdma_descq_freecnt(ppd); in dump_sdma_state()
733 descq = ppd->sdma_descq; in dump_sdma_state()
735 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_state()
737 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_state()
739 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_state()
758 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_state()
761 if (++head == ppd->sdma_descq_cnt) in dump_sdma_state()
766 list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist, in dump_sdma_state()
768 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_state()
773 void qib_sdma_process_event(struct qib_pportdata *ppd, in qib_sdma_process_event() argument
778 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_sdma_process_event()
780 __qib_sdma_process_event(ppd, event); in qib_sdma_process_event()
782 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running) in qib_sdma_process_event()
783 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); in qib_sdma_process_event()
785 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_sdma_process_event()
788 void __qib_sdma_process_event(struct qib_pportdata *ppd, in __qib_sdma_process_event() argument
791 struct qib_sdma_state *ss = &ppd->sdma_state; in __qib_sdma_process_event()
809 sdma_get(&ppd->sdma_state); in __qib_sdma_process_event()
810 sdma_set_state(ppd, in __qib_sdma_process_event()
816 sdma_sw_tear_down(ppd); in __qib_sdma_process_event()
836 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in __qib_sdma_process_event()
837 sdma_sw_tear_down(ppd); in __qib_sdma_process_event()
842 sdma_set_state(ppd, ss->go_s99_running ? in __qib_sdma_process_event()
870 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in __qib_sdma_process_event()
871 sdma_sw_tear_down(ppd); in __qib_sdma_process_event()
878 sdma_set_state(ppd, qib_sdma_state_s99_running); in __qib_sdma_process_event()
901 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in __qib_sdma_process_event()
911 sdma_set_state(ppd, in __qib_sdma_process_event()
913 sdma_hw_start_up(ppd); in __qib_sdma_process_event()
934 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in __qib_sdma_process_event()
935 sdma_start_sw_clean_up(ppd); in __qib_sdma_process_event()
947 sdma_set_state(ppd, in __qib_sdma_process_event()
949 sdma_start_sw_clean_up(ppd); in __qib_sdma_process_event()
968 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in __qib_sdma_process_event()
969 sdma_start_sw_clean_up(ppd); in __qib_sdma_process_event()
983 sdma_set_state(ppd, in __qib_sdma_process_event()
985 ppd->dd->f_sdma_hw_clean_up(ppd); in __qib_sdma_process_event()
1002 sdma_set_state(ppd, qib_sdma_state_s00_hw_down); in __qib_sdma_process_event()
1003 sdma_start_sw_clean_up(ppd); in __qib_sdma_process_event()
1016 sdma_set_state(ppd, in __qib_sdma_process_event()
1018 sdma_start_sw_clean_up(ppd); in __qib_sdma_process_event()
1021 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); in __qib_sdma_process_event()
1025 sdma_set_state(ppd, in __qib_sdma_process_event()
1027 sdma_start_sw_clean_up(ppd); in __qib_sdma_process_event()
1030 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); in __qib_sdma_process_event()