Searched refs:rspq (Results 1 - 26 of 26) sorted by relevance

/linux-4.4.14/drivers/net/ethernet/brocade/bna/
H A Dbfa_msgq.c319 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
330 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
331 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
333 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
334 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
338 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) rspq_sm_stopped_entry() argument
340 rspq->producer_index = 0; rspq_sm_stopped_entry()
341 rspq->consumer_index = 0; rspq_sm_stopped_entry()
342 rspq->flags = 0; rspq_sm_stopped_entry()
346 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_stopped() argument
350 bfa_fsm_set_state(rspq, rspq_sm_init_wait); rspq_sm_stopped()
364 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) rspq_sm_init_wait_entry() argument
366 bfa_wc_down(&rspq->msgq->init_wc); rspq_sm_init_wait_entry()
370 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_init_wait() argument
375 bfa_fsm_set_state(rspq, rspq_sm_stopped); rspq_sm_init_wait()
379 bfa_fsm_set_state(rspq, rspq_sm_ready); rspq_sm_init_wait()
388 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) rspq_sm_ready_entry() argument
393 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_ready() argument
398 bfa_fsm_set_state(rspq, rspq_sm_stopped); rspq_sm_ready()
402 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); rspq_sm_ready()
411 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) rspq_sm_dbell_wait_entry() argument
413 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc)) rspq_sm_dbell_wait_entry()
414 bfa_msgq_rspq_dbell(rspq); rspq_sm_dbell_wait_entry()
418 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_dbell_wait() argument
423 bfa_fsm_set_state(rspq, rspq_sm_stopped); rspq_sm_dbell_wait()
427 rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE; rspq_sm_dbell_wait()
431 if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) { rspq_sm_dbell_wait()
432 rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE; rspq_sm_dbell_wait()
433 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); rspq_sm_dbell_wait()
435 bfa_fsm_set_state(rspq, rspq_sm_ready); rspq_sm_dbell_wait()
446 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; bfa_msgq_rspq_dbell_ready() local
447 bfa_fsm_send_event(rspq, RSPQ_E_DB_READY); bfa_msgq_rspq_dbell_ready()
451 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) bfa_msgq_rspq_dbell() argument
454 (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]); bfa_msgq_rspq_dbell()
459 dbell->idx.rspq_ci = htons(rspq->consumer_index); bfa_msgq_rspq_dbell()
461 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb, bfa_msgq_rspq_dbell()
462 bfa_msgq_rspq_dbell_ready, rspq)) { bfa_msgq_rspq_dbell()
463 bfa_msgq_rspq_dbell_ready(rspq); bfa_msgq_rspq_dbell()
468 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) bfa_msgq_rspq_pi_update() argument
476 rspq->producer_index = ntohs(dbell->idx.rspq_pi); bfa_msgq_rspq_pi_update()
478 while (rspq->consumer_index != rspq->producer_index) { bfa_msgq_rspq_pi_update()
479 rspq_qe = (u8 *)rspq->addr.kva; bfa_msgq_rspq_pi_update()
480 rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE); bfa_msgq_rspq_pi_update()
486 if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL)) bfa_msgq_rspq_pi_update()
489 (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr); bfa_msgq_rspq_pi_update()
491 BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries, bfa_msgq_rspq_pi_update()
492 rspq->depth); bfa_msgq_rspq_pi_update()
495 bfa_fsm_send_event(rspq, RSPQ_E_RESP); bfa_msgq_rspq_pi_update()
499 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) bfa_msgq_rspq_attach() argument
501 rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY; bfa_msgq_rspq_attach()
502 rspq->msgq = msgq; bfa_msgq_rspq_attach()
503 bfa_fsm_set_state(rspq, rspq_sm_stopped); bfa_msgq_rspq_attach()
511 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP); bfa_msgq_init_rsp()
527 bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa); bfa_msgq_init()
528 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); bfa_msgq_init()
544 bfa_msgq_rspq_pi_update(&msgq->rspq, msg); bfa_msgq_isr()
571 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START); bfa_msgq_notify()
577 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP); bfa_msgq_notify()
582 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL); bfa_msgq_notify()
606 msgq->rspq.addr.kva = kva; bfa_msgq_memclaim()
607 msgq->rspq.addr.pa = pa; bfa_msgq_memclaim()
616 bfa_msgq_rspq_attach(&msgq->rspq, msgq); bfa_msgq_attach()
627 msgq->rspq.rsphdlr[mc].cbfn = cbfn; bfa_msgq_regisr()
628 msgq->rspq.rsphdlr[mc].cbarg = cbarg; bfa_msgq_regisr()
647 struct bfa_msgq_rspq *rspq = &msgq->rspq; bfa_msgq_rsp_copy() local
653 ci = rspq->consumer_index; bfa_msgq_rsp_copy()
654 src = (u8 *)rspq->addr.kva; bfa_msgq_rsp_copy()
664 BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth); bfa_msgq_rsp_copy()
665 src = (u8 *)rspq->addr.kva; bfa_msgq_rsp_copy()
H A Dbfa_msgq.h113 struct bfa_msgq_rspq rspq; member in struct:bfa_msgq
H A Dbfi.h429 struct bfi_msgq rspq; member in struct:bfi_msgq_cfg_req
/linux-4.4.14/drivers/scsi/bfa/
H A Dbfa_hw_ct.c74 bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) bfa_hwct_rspq_ack() argument
78 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); bfa_hwct_rspq_ack()
79 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); bfa_hwct_rspq_ack()
81 bfa_rspq_ci(bfa, rspq) = ci; bfa_hwct_rspq_ack()
82 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); bfa_hwct_rspq_ack()
92 bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) bfa_hwct2_rspq_ack() argument
94 bfa_rspq_ci(bfa, rspq) = ci; bfa_hwct2_rspq_ack()
95 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); bfa_hwct2_rspq_ack()
H A Dbfa_hw_cb.c53 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) bfa_hwcb_rspq_ack_msix() argument
55 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), bfa_hwcb_rspq_ack_msix()
58 if (bfa_rspq_ci(bfa, rspq) == ci) bfa_hwcb_rspq_ack_msix()
61 bfa_rspq_ci(bfa, rspq) = ci; bfa_hwcb_rspq_ack_msix()
62 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); bfa_hwcb_rspq_ack_msix()
67 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) bfa_hwcb_rspq_ack() argument
69 if (bfa_rspq_ci(bfa, rspq) == ci) bfa_hwcb_rspq_ack()
72 bfa_rspq_ci(bfa, rspq) = ci; bfa_hwcb_rspq_ack()
73 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); bfa_hwcb_rspq_ack()
H A Dbfa.h187 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
321 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
333 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
334 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
335 void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
H A Dbfi_ms.h141 u32 rspq_ba; /* rspq base addr */
143 u32 rspq_spi; /* rspq shadow pi */
H A Dbfi.h600 struct bfi_msgq_s rspq; member in struct:bfi_msgq_cfg_req_s
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c1571 struct adapter *adapter = rxq->rspq.adapter; do_gro()
1576 skb = napi_get_frags(&rxq->rspq.napi); do_gro()
1588 skb_record_rx_queue(skb, rxq->rspq.idx); do_gro()
1595 ret = napi_gro_frags(&rxq->rspq.napi); do_gro()
1607 * @rspq: the response queue that received the packet
1613 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, t4vf_ethrx_handler() argument
1619 (rspq->netdev->features & NETIF_F_RXCSUM); t4vf_ethrx_handler()
1620 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); t4vf_ethrx_handler()
1621 struct adapter *adapter = rspq->adapter; t4vf_ethrx_handler()
1629 (rspq->netdev->features & NETIF_F_GRO) && csum_ok && t4vf_ethrx_handler()
1645 skb->protocol = eth_type_trans(skb, rspq->netdev); t4vf_ethrx_handler()
1646 skb_record_rx_queue(skb, rspq->idx); t4vf_ethrx_handler()
1675 * @rspq: the response queue
1681 const struct sge_rspq *rspq) is_new_response()
1683 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen; is_new_response()
1725 * @rspq: the queue
1729 static inline void rspq_next(struct sge_rspq *rspq) rspq_next() argument
1731 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; rspq_next()
1732 if (unlikely(++rspq->cidx == rspq->size)) { rspq_next()
1733 rspq->cidx = 0; rspq_next()
1734 rspq->gen ^= 1; rspq_next()
1735 rspq->cur_desc = rspq->desc; rspq_next()
1741 * @rspq: the ingress response queue to process
1752 static int process_responses(struct sge_rspq *rspq, int budget) process_responses() argument
1754 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); process_responses()
1755 struct adapter *adapter = rspq->adapter; process_responses()
1763 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); process_responses()
1764 if (!is_new_response(rc, rspq)) process_responses()
1790 if (likely(rspq->offset > 0)) { process_responses()
1791 free_rx_bufs(rspq->adapter, &rxq->fl, process_responses()
1793 rspq->offset = 0; process_responses()
1808 fp->offset = rspq->offset; process_responses()
1813 unmap_rx_buf(rspq->adapter, &rxq->fl); process_responses()
1822 dma_sync_single_for_cpu(rspq->adapter->pdev_dev, process_responses()
1833 ret = rspq->handler(rspq, rspq->cur_desc, &gl); process_responses()
1835 rspq->offset += ALIGN(fp->size, s->fl_align); process_responses()
1839 ret = rspq->handler(rspq, rspq->cur_desc, NULL); process_responses()
1852 rspq->next_intr_params = process_responses()
1857 rspq_next(rspq); process_responses()
1866 if (rspq->offset >= 0 && process_responses()
1868 __refill_fl(rspq->adapter, &rxq->fl); process_responses()
1886 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); napi_rx_handler() local
1887 int work_done = process_responses(rspq, budget); napi_rx_handler()
1892 intr_params = rspq->next_intr_params; napi_rx_handler()
1893 rspq->next_intr_params = rspq->intr_params; napi_rx_handler()
1898 rspq->unhandled_irqs++; napi_rx_handler()
1904 if (unlikely(!rspq->bar2_addr)) { napi_rx_handler()
1905 t4_write_reg(rspq->adapter, napi_rx_handler()
1907 val | INGRESSQID_V((u32)rspq->cntxt_id)); napi_rx_handler()
1909 writel(val | INGRESSQID_V(rspq->bar2_qid), napi_rx_handler()
1910 rspq->bar2_addr + SGE_UDB_GTS); napi_rx_handler()
1922 struct sge_rspq *rspq = cookie; t4vf_sge_intr_msix() local
1924 napi_schedule(&rspq->napi); t4vf_sge_intr_msix()
1943 struct sge_rspq *rspq; process_intrq() local
1981 rspq = s->ingr_map[iq_idx]; process_intrq()
1982 if (unlikely(rspq == NULL)) { process_intrq()
1987 if (unlikely(rspq->abs_id != qid)) { process_intrq()
1990 qid, rspq->abs_id); process_intrq()
1999 napi_schedule(&rspq->napi); process_intrq()
2094 if (napi_reschedule(&rxq->rspq.napi)) sge_rx_timer_cb()
2191 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2192 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2193 * @dev: the network device associated with the new rspq
2196 * @hnd: the interrupt handler to invoke for the rspq
2198 int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, t4vf_sge_alloc_rxq() argument
2215 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { t4vf_sge_alloc_rxq()
2227 rspq->size = roundup(rspq->size, 16); t4vf_sge_alloc_rxq()
2228 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, t4vf_sge_alloc_rxq()
2229 0, &rspq->phys_addr, NULL, 0); t4vf_sge_alloc_rxq()
2230 if (!rspq->desc) t4vf_sge_alloc_rxq()
2259 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | t4vf_sge_alloc_rxq()
2260 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); t4vf_sge_alloc_rxq()
2261 cmd.iqsize = cpu_to_be16(rspq->size); t4vf_sge_alloc_rxq()
2262 cmd.iqaddr = cpu_to_be64(rspq->phys_addr); t4vf_sge_alloc_rxq()
2321 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); t4vf_sge_alloc_rxq()
2322 rspq->cur_desc = rspq->desc; t4vf_sge_alloc_rxq()
2323 rspq->cidx = 0; t4vf_sge_alloc_rxq()
2324 rspq->gen = 1; t4vf_sge_alloc_rxq()
2325 rspq->next_intr_params = rspq->intr_params; t4vf_sge_alloc_rxq()
2326 rspq->cntxt_id = be16_to_cpu(rpl.iqid); t4vf_sge_alloc_rxq()
2327 rspq->bar2_addr = bar2_address(adapter, t4vf_sge_alloc_rxq()
2328 rspq->cntxt_id, t4vf_sge_alloc_rxq()
2330 &rspq->bar2_qid); t4vf_sge_alloc_rxq()
2331 rspq->abs_id = be16_to_cpu(rpl.physiqid); t4vf_sge_alloc_rxq()
2332 rspq->size--; /* subtract status entry */ t4vf_sge_alloc_rxq()
2333 rspq->adapter = adapter; t4vf_sge_alloc_rxq()
2334 rspq->netdev = dev; t4vf_sge_alloc_rxq()
2335 rspq->handler = hnd; t4vf_sge_alloc_rxq()
2338 rspq->offset = fl ? 0 : -1; t4vf_sge_alloc_rxq()
2368 if (rspq->desc) { t4vf_sge_alloc_rxq()
2369 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, t4vf_sge_alloc_rxq()
2370 rspq->desc, rspq->phys_addr); t4vf_sge_alloc_rxq()
2371 rspq->desc = NULL; t4vf_sge_alloc_rxq()
2505 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, free_rspq_fl() argument
2512 rspq->cntxt_id, flid, 0xffff); free_rspq_fl()
2513 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, free_rspq_fl()
2514 rspq->desc, rspq->phys_addr); free_rspq_fl()
2515 netif_napi_del(&rspq->napi); free_rspq_fl()
2516 rspq->netdev = NULL; free_rspq_fl()
2517 rspq->cntxt_id = 0; free_rspq_fl()
2518 rspq->abs_id = 0; free_rspq_fl()
2519 rspq->desc = NULL; free_rspq_fl()
2549 if (rxq->rspq.desc) t4vf_free_sge_resources()
2550 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); t4vf_free_sge_resources()
1680 is_new_response(const struct rsp_ctrl *rc, const struct sge_rspq *rspq) is_new_response() argument
H A Dcxgb4vf_main.c342 &s->ethrxq[rxq].rspq); for_each_ethrxq()
351 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
368 &s->ethrxq[rxq].rspq); free_msix_queue_irqs()
374 static void qenable(struct sge_rspq *rspq) qenable() argument
376 napi_enable(&rspq->napi); qenable()
382 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, qenable()
384 SEINTARM_V(rspq->intr_params) | qenable()
385 INGRESSQID_V(rspq->cntxt_id)); qenable()
397 qenable(&s->ethrxq[rxq].rspq); enable_rx()
421 napi_disable(&s->ethrxq[rxq].rspq.napi); quiesce_rx()
428 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, fwevtq_handler() argument
434 struct adapter *adapter = rspq->adapter; fwevtq_handler()
578 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, for_each_port()
590 rxq->rspq.idx = qs; for_each_port()
599 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
609 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; for_each_port()
651 rss[qs] = rxq[qs].rspq.abs_id; for_each_port()
678 rxq[0].rspq.abs_id; for_each_port()
1022 const struct sge_rspq *rspq) qtimer_val()
1024 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params); qtimer_val()
1034 * @rspq: the RX response queue
1042 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, set_rxq_intr_params() argument
1065 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { set_rxq_intr_params()
1069 FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); set_rxq_intr_params()
1074 rspq->pktcnt_idx = pktcnt_idx; set_rxq_intr_params()
1089 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) | set_rxq_intr_params()
1213 t4vf_sge_intr_msix(0, &rxq->rspq); cxgb4vf_poll_controller()
1389 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; cxgb4vf_get_ringparam()
1422 s->ethrxq[qs].rspq.size = rp->rx_mini_pending; cxgb4vf_set_ringparam()
1438 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; cxgb4vf_get_coalesce() local
1440 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); cxgb4vf_get_coalesce()
1442 ((rspq->intr_params & QINTR_CNT_EN_F) cxgb4vf_get_coalesce()
1443 ? adapter->sge.counter_val[rspq->pktcnt_idx] cxgb4vf_get_coalesce()
1460 &adapter->sge.ethrxq[pi->first_qset].rspq, cxgb4vf_set_coalesce()
1759 (rxq[qs].rspq.netdev sge_qinfo_show()
1760 ? rxq[qs].rspq.netdev->name sge_qinfo_show()
1763 (rxq[qs].rspq.netdev sge_qinfo_show()
1765 netdev_priv(rxq[qs].rspq.netdev))->port_id sge_qinfo_show()
1772 R("RspQ ID:", rspq.abs_id); sge_qinfo_show()
1773 R("RspQ size:", rspq.size); sge_qinfo_show()
1774 R("RspQE size:", rspq.iqe_len); sge_qinfo_show()
1775 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); sge_qinfo_show()
1777 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); sge_qinfo_show()
1778 R("RspQ CIdx:", rspq.cidx); sge_qinfo_show()
1779 R("RspQ Gen:", rspq.gen); sge_qinfo_show()
1916 (rxq[qs].rspq.netdev sge_qstats_show()
1917 ? rxq[qs].rspq.netdev->name sge_qstats_show()
1919 R3("u", "RspQNullInts:", rspq.unhandled_irqs); sge_qstats_show()
2392 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx, init_rspq() argument
2396 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) | init_rspq()
2399 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS init_rspq()
2402 rspq->iqe_len = iqe_size; init_rspq()
2403 rspq->size = size; init_rspq()
2475 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
1021 qtimer_val(const struct adapter *adapter, const struct sge_rspq *rspq) qtimer_val() argument
H A Dt4vf_hw.c923 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
924 * @nrspq: number of values in @rspq
930 * The caller must ensure the values in @rspq are in the range 0..1023.
933 int start, int n, const u16 *rspq, int nrspq) t4vf_config_rss_range()
935 const u16 *rsp = rspq; t4vf_config_rss_range()
936 const u16 *rsp_end = rspq+nrspq; t4vf_config_rss_range()
995 rsp = rspq; t4vf_config_rss_range()
932 t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, int start, int n, const u16 *rspq, int nrspq) t4vf_config_rss_range() argument
H A Dadapter.h211 struct sge_rspq rspq; /* Response Queue */ member in struct:sge_eth_rxq
/linux-4.4.14/drivers/staging/slicoss/
H A Dslicoss.c1187 struct slic_rspqueue *rspq = &adapter->rspqueue; slic_rspqueue_free() local
1189 for (i = 0; i < rspq->num_pages; i++) { slic_rspqueue_free()
1190 if (rspq->vaddr[i]) { slic_rspqueue_free()
1192 rspq->vaddr[i], rspq->paddr[i]); slic_rspqueue_free()
1194 rspq->vaddr[i] = NULL; slic_rspqueue_free()
1195 rspq->paddr[i] = 0; slic_rspqueue_free()
1197 rspq->offset = 0; slic_rspqueue_free()
1198 rspq->pageindex = 0; slic_rspqueue_free()
1199 rspq->rspbuf = NULL; slic_rspqueue_free()
1205 struct slic_rspqueue *rspq = &adapter->rspqueue; slic_rspqueue_init() local
1209 memset(rspq, 0, sizeof(struct slic_rspqueue)); slic_rspqueue_init()
1211 rspq->num_pages = SLIC_RSPQ_PAGES_GB; slic_rspqueue_init()
1213 for (i = 0; i < rspq->num_pages; i++) { slic_rspqueue_init()
1214 rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev, slic_rspqueue_init()
1216 &rspq->paddr[i]); slic_rspqueue_init()
1217 if (!rspq->vaddr[i]) { slic_rspqueue_init()
1226 (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE), slic_rspqueue_init()
1230 (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE), slic_rspqueue_init()
1235 rspq->offset = 0; slic_rspqueue_init()
1236 rspq->pageindex = 0; slic_rspqueue_init()
1237 rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0]; slic_rspqueue_init()
1243 struct slic_rspqueue *rspq = &adapter->rspqueue; slic_rspqueue_getnext() local
1246 if (!(rspq->rspbuf->status)) slic_rspqueue_getnext()
1249 buf = rspq->rspbuf; slic_rspqueue_getnext()
1250 if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) { slic_rspqueue_getnext()
1251 rspq->rspbuf++; slic_rspqueue_getnext()
1254 (rspq->paddr[rspq->pageindex] | SLIC_RSPQ_BUFSINPAGE), slic_rspqueue_getnext()
1256 rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages; slic_rspqueue_getnext()
1257 rspq->offset = 0; slic_rspqueue_getnext()
1258 rspq->rspbuf = (struct slic_rspbuf *) slic_rspqueue_getnext()
1259 rspq->vaddr[rspq->pageindex]; slic_rspqueue_getnext()
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_debugfs.c2285 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); sge_qinfo_show()
2300 R("RspQ ID:", rspq.abs_id); sge_qinfo_show()
2301 R("RspQ size:", rspq.size); sge_qinfo_show()
2302 R("RspQE size:", rspq.iqe_len); sge_qinfo_show()
2303 R("RspQ CIDX:", rspq.cidx); sge_qinfo_show()
2304 R("RspQ Gen:", rspq.gen); sge_qinfo_show()
2305 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); sge_qinfo_show()
2307 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); sge_qinfo_show()
2343 R("RspQ ID:", rspq.abs_id); sge_qinfo_show()
2344 R("RspQ size:", rspq.size); sge_qinfo_show()
2345 R("RspQE size:", rspq.iqe_len); sge_qinfo_show()
2346 R("RspQ CIDX:", rspq.cidx); sge_qinfo_show()
2347 R("RspQ Gen:", rspq.gen); sge_qinfo_show()
2348 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); sge_qinfo_show()
2350 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); sge_qinfo_show()
2371 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); sge_qinfo_show()
2372 R("RspQ ID:", rspq.abs_id); sge_qinfo_show()
2373 R("RspQ size:", rspq.size); sge_qinfo_show()
2374 R("RspQE size:", rspq.iqe_len); sge_qinfo_show()
2375 R("RspQ CIDX:", rspq.cidx); sge_qinfo_show()
2376 R("RspQ Gen:", rspq.gen); sge_qinfo_show()
2377 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); sge_qinfo_show()
2379 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); sge_qinfo_show()
2399 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); sge_qinfo_show()
2400 R("RspQ ID:", rspq.abs_id); sge_qinfo_show()
2401 R("RspQ size:", rspq.size); sge_qinfo_show()
2402 R("RspQE size:", rspq.iqe_len); sge_qinfo_show()
2403 R("RspQ CIDX:", rspq.cidx); sge_qinfo_show()
2404 R("RspQ Gen:", rspq.gen); sge_qinfo_show()
2405 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); sge_qinfo_show()
2407 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); sge_qinfo_show()
H A Dcxgb4_main.c695 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); uldrx_handler()
797 &s->ethrxq[ethqidx].rspq); for_each_ethrxq()
806 &s->ofldrxq[ofldqidx].rspq); for_each_ofldrxq()
815 &s->rdmarxq[rdmaqidx].rspq); for_each_rdmarxq()
824 &s->rdmaciq[rdmaciqqidx].rspq); for_each_rdmaciq()
834 &s->rdmaciq[rdmaciqqidx].rspq);
837 &s->rdmarxq[rdmaqidx].rspq);
840 &s->ofldrxq[ofldqidx].rspq);
843 &s->ethrxq[ethqidx].rspq);
855 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); free_msix_queue_irqs()
857 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); free_msix_queue_irqs()
859 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); free_msix_queue_irqs()
861 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); free_msix_queue_irqs()
887 rss[i] = rxq[*queues].rspq.abs_id; cxgb4_write_rss()
1009 err = t4_sge_alloc_rxq(adap, &q->rspq, false, alloc_ofld_rxqs()
1017 ids[i] = q->rspq.abs_id; alloc_ofld_rxqs()
1077 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, for_each_port()
1084 q->rspq.idx = j; for_each_port()
1122 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't for_each_port()
1127 s->rdmarxq[i].rspq.cntxt_id); for_each_port()
1136 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
3069 t4_sge_intr_msix(0, &rx->rspq); cxgb_netpoll()
4373 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4389 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4390 r->rspq.uld = CXGB4_ULD_ISCSI;
4397 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
4398 r->rspq.uld = CXGB4_ULD_RDMA;
4411 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
4412 r->rspq.uld = CXGB4_ULD_RDMA;
H A Dcxgb4_ethtool.c705 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; get_sge_param()
730 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; set_sge_param()
752 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); set_rx_intr_params()
767 q->rspq.adaptive_rx = adaptive_rx; set_adaptive_rx_setting()
778 return q->rspq.adaptive_rx; get_adaptive_rx_setting()
792 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; get_coalesce()
H A Dsge.c1848 struct adapter *adapter = rxq->rspq.adap; do_gro()
1854 skb = napi_get_frags(&rxq->rspq.napi); do_gro()
1866 skb_record_rx_queue(skb, rxq->rspq.idx); do_gro()
1867 skb_mark_napi_id(skb, &rxq->rspq.napi); do_gro()
1872 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) do_gro()
1880 ret = napi_gro_frags(&rxq->rspq.napi); do_gro()
1903 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); t4_ethrx_handler()
2059 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); process_responses()
2355 if (napi_reschedule(&rxq->rspq.napi)) sge_rx_timer_cb()
2855 if (q->rspq.desc) t4_free_ofld_rxqs()
2856 free_rspq_fl(adap, &q->rspq, t4_free_ofld_rxqs()
2874 if (eq->rspq.desc) t4_free_sge_resources()
2875 free_rspq_fl(adap, &eq->rspq, t4_free_sge_resources()
H A Dcxgb4.h560 struct sge_rspq rspq; member in struct:sge_eth_rxq
573 struct sge_rspq rspq; member in struct:sge_ofld_rxq
1285 int start, int n, const u16 *rspq, unsigned int nrspq);
H A Dt4_hw.c4453 * @rspq: values for the response queue lookup table
4454 * @nrspq: number of values in @rspq
4460 * The caller must ensure the values in @rspq are in the range allowed for
4464 int start, int n, const u16 *rspq, unsigned int nrspq) t4_config_rss_range()
4467 const u16 *rsp = rspq; t4_config_rss_range()
4468 const u16 *rsp_end = rspq + nrspq; t4_config_rss_range()
4493 rsp = rspq; t4_config_rss_range()
4496 rsp = rspq; t4_config_rss_range()
4499 rsp = rspq; t4_config_rss_range()
4463 t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) t4_config_rss_range() argument
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c174 return container_of(q, struct sge_qset, rspq); rspq_to_qset()
652 memset(&q->rspq, 0, sizeof(q->rspq)); t3_reset_qset()
707 if (q->rspq.desc) { t3_free_qset()
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); t3_free_qset()
712 q->rspq.size * sizeof(struct rsp_desc), t3_free_qset()
713 q->rspq.desc, q->rspq.phys_addr); t3_free_qset()
728 qs->rspq.cntxt_id = id; init_qset_cntxt()
1833 struct sge_rspq *q = &qs->rspq; ofld_poll()
2258 struct sge_rspq *q = &qs->rspq; process_responses()
2433 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | napi_rx_handler()
2434 V_NEWTIMER(qs->rspq.next_holdoff) | napi_rx_handler()
2435 V_NEWINDEX(qs->rspq.cidx)); napi_rx_handler()
2465 struct sge_rspq *q = &qs->rspq; process_pure_responses()
2544 struct sge_rspq *q = &qs->rspq; t3_sge_intr_msix()
2562 struct sge_rspq *q = &qs->rspq; t3_sge_intr_msix_napi()
2582 struct sge_rspq *q = &adap->sge.qs[0].rspq; t3_intr_msi()
2594 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; t3_intr_msi()
2611 struct sge_rspq *q = &qs->rspq; rspq_check_napi()
2632 struct sge_rspq *q = &adap->sge.qs[0].rspq; t3_intr_msi_napi()
2670 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; t3_intr()
2671 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; t3_intr()
2708 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; t3b_intr()
2725 process_responses_gts(adap, &adap->sge.qs[1].rspq); t3b_intr()
2743 struct sge_rspq *q0 = &qs0->rspq; t3b_intr_napi()
2905 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; sge_timer_rx()
2916 if (status & (1 << qs->rspq.cntxt_id)) { sge_timer_rx()
2917 qs->rspq.starved++; sge_timer_rx()
2918 if (qs->rspq.credits) { sge_timer_rx()
2919 qs->rspq.credits--; sge_timer_rx()
2920 refill_rspq(adap, &qs->rspq, 1); sge_timer_rx()
2921 qs->rspq.restarted++; sge_timer_rx()
2923 1 << qs->rspq.cntxt_id); sge_timer_rx()
2949 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ t3_update_qset_coalesce()
2950 qs->rspq.polling = p->polling; t3_update_qset_coalesce()
2996 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, t3_sge_alloc_qset()
2998 &q->rspq.phys_addr, NULL); t3_sge_alloc_qset()
2999 if (!q->rspq.desc) t3_sge_alloc_qset()
3031 q->rspq.gen = 1; t3_sge_alloc_qset()
3032 q->rspq.size = p->rspq_size; t3_sge_alloc_qset()
3033 spin_lock_init(&q->rspq.lock); t3_sge_alloc_qset()
3034 skb_queue_head_init(&q->rspq.rx_queue); t3_sge_alloc_qset()
3062 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, t3_sge_alloc_qset()
3063 q->rspq.phys_addr, q->rspq.size, t3_sge_alloc_qset()
3125 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); t3_sge_alloc_qset()
3127 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | t3_sge_alloc_qset()
3128 V_NEWTIMER(q->rspq.holdoff_tmr)); t3_sge_alloc_qset()
H A Dadapter.h203 struct sge_rspq rspq; member in struct:sge_qset
H A Dcommon.h317 unsigned int polling; /* polling/interrupt service for rspq */
699 const u8 * cpus, const u16 *rspq);
748 unsigned int size, int rspq, int ovfl_mode,
H A Dcxgb3_main.c412 rspq.polling), 0, for_each_port()
448 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { await_mgmt_replies()
461 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; init_tp_parity()
1170 struct sge_rspq *q = &adap->sge.qs[i].rspq; t3_synchronize_rx()
1276 adap->sge.qs[0].rspq. cxgb_up()
2601 t3_intr_handler(adapter, qs->rspq.polling) (0, source); cxgb_netpoll()
H A Dt3_hw.c2320 * @rspq: response queue for async notifications
2330 unsigned int size, int rspq, int ovfl_mode, t3_sge_init_cqcntxt()
2343 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) | t3_sge_init_cqcntxt()
2493 * @rspq: values for the response queue lookup table (0xffff terminated)
2495 * Programs the receive packet steering logic. @cpus and @rspq provide
2501 const u8 * cpus, const u16 *rspq) t3_config_rss()
2517 if (rspq) t3_config_rss()
2520 (i << 16) | rspq[q_idx++]); t3_config_rss()
2521 if (rspq[q_idx] == 0xffff) t3_config_rss()
2329 t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, unsigned int size, int rspq, int ovfl_mode, unsigned int credits, unsigned int credit_thres) t3_sge_init_cqcntxt() argument
2500 t3_config_rss(struct adapter *adapter, unsigned int rss_config, const u8 * cpus, const u16 *rspq) t3_config_rss() argument
H A Dt3_cpl.h1441 __u8 rspq:3; member in struct:cpl_rdma_terminate
1445 __u8 rspq:3; member in struct:cpl_rdma_terminate
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A Ddevice.c1082 * rss_header from the rspq descriptor (1 flit) copy_gl_to_skb_pkt()
1083 * cpl_rx_pkt struct from the rspq descriptor (2 flits) copy_gl_to_skb_pkt()

Completed in 676 milliseconds