Lines Matching refs:q
227 static inline unsigned int txq_avail(const struct sge_txq *q) in txq_avail() argument
229 return q->size - 1 - q->in_use; in txq_avail()
322 const struct ulptx_sgl *sgl, const struct sge_txq *q) in unmap_sgl() argument
341 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { in unmap_sgl()
347 } else if ((u8 *)p == (u8 *)q->stat) { in unmap_sgl()
348 p = (const struct ulptx_sge_pair *)q->desc; in unmap_sgl()
350 } else if ((u8 *)p + 8 == (u8 *)q->stat) { in unmap_sgl()
351 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
359 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
371 if ((u8 *)p == (u8 *)q->stat) in unmap_sgl()
372 p = (const struct ulptx_sge_pair *)q->desc; in unmap_sgl()
373 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : in unmap_sgl()
374 *(const __be64 *)q->desc; in unmap_sgl()
390 static void free_tx_desc(struct adapter *adap, struct sge_txq *q, in free_tx_desc() argument
394 unsigned int cidx = q->cidx; in free_tx_desc()
397 d = &q->sdesc[cidx]; in free_tx_desc()
401 unmap_sgl(dev, d->skb, d->sgl, q); in free_tx_desc()
406 if (++cidx == q->size) { in free_tx_desc()
408 d = q->sdesc; in free_tx_desc()
411 q->cidx = cidx; in free_tx_desc()
417 static inline int reclaimable(const struct sge_txq *q) in reclaimable() argument
419 int hw_cidx = ntohs(q->stat->cidx); in reclaimable()
420 hw_cidx -= q->cidx; in reclaimable()
421 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; in reclaimable()
434 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, in reclaim_completed_tx() argument
437 int avail = reclaimable(q); in reclaim_completed_tx()
447 free_tx_desc(adap, q, avail, unmap); in reclaim_completed_tx()
448 q->in_use -= avail; in reclaim_completed_tx()
492 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) in free_rx_bufs() argument
495 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in free_rx_bufs()
503 if (++q->cidx == q->size) in free_rx_bufs()
504 q->cidx = 0; in free_rx_bufs()
505 q->avail--; in free_rx_bufs()
520 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) in unmap_rx_buf() argument
522 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in unmap_rx_buf()
528 if (++q->cidx == q->size) in unmap_rx_buf()
529 q->cidx = 0; in unmap_rx_buf()
530 q->avail--; in unmap_rx_buf()
533 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
536 if (q->pend_cred >= 8) { in ring_fl_db()
538 val = PIDX_V(q->pend_cred / 8); in ring_fl_db()
540 val = PIDX_T5_V(q->pend_cred / 8) | in ring_fl_db()
549 if (unlikely(q->bar2_addr == NULL)) { in ring_fl_db()
551 val | QID_V(q->cntxt_id)); in ring_fl_db()
553 writel(val | QID_V(q->bar2_qid), in ring_fl_db()
554 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
561 q->pend_cred &= 7; in ring_fl_db()
586 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, in refill_fl() argument
592 unsigned int cred = q->avail; in refill_fl()
593 __be64 *d = &q->desc[q->pidx]; in refill_fl()
594 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
609 q->large_alloc_failed++; in refill_fl()
626 q->avail++; in refill_fl()
627 if (++q->pidx == q->size) { in refill_fl()
628 q->pidx = 0; in refill_fl()
629 sd = q->sdesc; in refill_fl()
630 d = q->desc; in refill_fl()
639 q->alloc_failed++; in refill_fl()
654 q->avail++; in refill_fl()
655 if (++q->pidx == q->size) { in refill_fl()
656 q->pidx = 0; in refill_fl()
657 sd = q->sdesc; in refill_fl()
658 d = q->desc; in refill_fl()
662 out: cred = q->avail - cred; in refill_fl()
663 q->pend_cred += cred; in refill_fl()
664 ring_fl_db(adap, q); in refill_fl()
666 if (unlikely(fl_starving(adap, q))) { in refill_fl()
668 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
852 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, in write_sgl() argument
881 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in write_sgl()
894 if (unlikely((u8 *)end > (u8 *)q->stat)) { in write_sgl()
895 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in write_sgl()
899 part1 = (u8 *)end - (u8 *)q->stat; in write_sgl()
900 memcpy(q->desc, (u8 *)buf + part0, part1); in write_sgl()
901 end = (void *)q->desc + part1; in write_sgl()
931 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) in ring_tx_db() argument
938 if (unlikely(q->bar2_addr == NULL)) { in ring_tx_db()
945 spin_lock_irqsave(&q->db_lock, flags); in ring_tx_db()
946 if (!q->db_disabled) in ring_tx_db()
948 QID_V(q->cntxt_id) | val); in ring_tx_db()
950 q->db_pidx_inc += n; in ring_tx_db()
951 q->db_pidx = q->pidx; in ring_tx_db()
952 spin_unlock_irqrestore(&q->db_lock, flags); in ring_tx_db()
968 if (n == 1 && q->bar2_qid == 0) { in ring_tx_db()
969 int index = (q->pidx in ring_tx_db()
970 ? (q->pidx - 1) in ring_tx_db()
971 : (q->size - 1)); in ring_tx_db()
972 u64 *wr = (u64 *)&q->desc[index]; in ring_tx_db()
975 (q->bar2_addr + SGE_UDB_WCDOORBELL), in ring_tx_db()
978 writel(val | QID_V(q->bar2_qid), in ring_tx_db()
979 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_tx_db()
1007 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, in inline_tx_skb() argument
1011 int left = (void *)q->stat - pos; in inline_tx_skb()
1021 skb_copy_bits(skb, left, q->desc, skb->len - left); in inline_tx_skb()
1022 pos = (void *)q->desc + (skb->len - left); in inline_tx_skb()
1078 static void eth_txq_stop(struct sge_eth_txq *q) in eth_txq_stop() argument
1080 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1081 q->q.stops++; in eth_txq_stop()
1084 static inline void txq_advance(struct sge_txq *q, unsigned int n) in txq_advance() argument
1086 q->in_use += n; in txq_advance()
1087 q->pidx += n; in txq_advance()
1088 if (q->pidx >= q->size) in txq_advance()
1089 q->pidx -= q->size; in txq_advance()
1139 struct sge_eth_txq *q; in t4_eth_xmit() local
1162 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in t4_eth_xmit()
1164 reclaim_completed_tx(adap, &q->q, true); in t4_eth_xmit()
1175 credits = txq_avail(&q->q) - ndesc; in t4_eth_xmit()
1178 eth_txq_stop(q); in t4_eth_xmit()
1190 q->mapping_err++; in t4_eth_xmit()
1196 eth_txq_stop(q); in t4_eth_xmit()
1200 wr = (void *)&q->q.desc[q->q.pidx]; in t4_eth_xmit()
1233 q->tso++; in t4_eth_xmit()
1234 q->tx_cso += ssi->gso_segs; in t4_eth_xmit()
1242 q->tx_cso++; in t4_eth_xmit()
1247 q->vlan_ins++; in t4_eth_xmit()
1263 inline_tx_skb(skb, &q->q, cpl + 1); in t4_eth_xmit()
1268 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, in t4_eth_xmit()
1272 last_desc = q->q.pidx + ndesc - 1; in t4_eth_xmit()
1273 if (last_desc >= q->q.size) in t4_eth_xmit()
1274 last_desc -= q->q.size; in t4_eth_xmit()
1275 q->q.sdesc[last_desc].skb = skb; in t4_eth_xmit()
1276 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); in t4_eth_xmit()
1279 txq_advance(&q->q, ndesc); in t4_eth_xmit()
1281 ring_tx_db(adap, &q->q, ndesc); in t4_eth_xmit()
1293 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1295 int hw_cidx = ntohs(q->stat->cidx); in reclaim_completed_tx_imm()
1296 int reclaim = hw_cidx - q->cidx; in reclaim_completed_tx_imm()
1299 reclaim += q->size; in reclaim_completed_tx_imm()
1301 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1302 q->cidx = hw_cidx; in reclaim_completed_tx_imm()
1326 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) in ctrlq_check_stop() argument
1328 reclaim_completed_tx_imm(&q->q); in ctrlq_check_stop()
1329 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in ctrlq_check_stop()
1331 q->q.stops++; in ctrlq_check_stop()
1332 q->full = 1; in ctrlq_check_stop()
1344 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) in ctrl_xmit() argument
1356 spin_lock(&q->sendq.lock); in ctrl_xmit()
1358 if (unlikely(q->full)) { in ctrl_xmit()
1360 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
1361 spin_unlock(&q->sendq.lock); in ctrl_xmit()
1365 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in ctrl_xmit()
1366 inline_tx_skb(skb, &q->q, wr); in ctrl_xmit()
1368 txq_advance(&q->q, ndesc); in ctrl_xmit()
1369 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) in ctrl_xmit()
1370 ctrlq_check_stop(q, wr); in ctrl_xmit()
1372 ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
1373 spin_unlock(&q->sendq.lock); in ctrl_xmit()
1389 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; in restart_ctrlq() local
1391 spin_lock(&q->sendq.lock); in restart_ctrlq()
1392 reclaim_completed_tx_imm(&q->q); in restart_ctrlq()
1393 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ in restart_ctrlq()
1395 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1403 spin_unlock(&q->sendq.lock); in restart_ctrlq()
1405 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in restart_ctrlq()
1406 inline_tx_skb(skb, &q->q, wr); in restart_ctrlq()
1410 txq_advance(&q->q, ndesc); in restart_ctrlq()
1411 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in restart_ctrlq()
1412 unsigned long old = q->q.stops; in restart_ctrlq()
1414 ctrlq_check_stop(q, wr); in restart_ctrlq()
1415 if (q->q.stops != old) { /* suspended anew */ in restart_ctrlq()
1416 spin_lock(&q->sendq.lock); in restart_ctrlq()
1421 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1424 spin_lock(&q->sendq.lock); in restart_ctrlq()
1426 q->full = 0; in restart_ctrlq()
1428 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1429 spin_unlock(&q->sendq.lock); in restart_ctrlq()
1492 static void txq_stop_maperr(struct sge_ofld_txq *q) in txq_stop_maperr() argument
1494 q->mapping_err++; in txq_stop_maperr()
1495 q->q.stops++; in txq_stop_maperr()
1496 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
1497 q->adap->sge.txq_maperr); in txq_stop_maperr()
1508 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) in ofldtxq_stop() argument
1513 q->q.stops++; in ofldtxq_stop()
1514 q->full = 1; in ofldtxq_stop()
1524 static void service_ofldq(struct sge_ofld_txq *q) in service_ofldq() argument
1532 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { in service_ofldq()
1537 spin_unlock(&q->sendq.lock); in service_ofldq()
1539 reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
1543 credits = txq_avail(&q->q) - ndesc; in service_ofldq()
1546 ofldtxq_stop(q, skb); in service_ofldq()
1548 pos = (u64 *)&q->q.desc[q->q.pidx]; in service_ofldq()
1550 inline_tx_skb(skb, &q->q, pos); in service_ofldq()
1551 else if (map_skb(q->adap->pdev_dev, skb, in service_ofldq()
1553 txq_stop_maperr(q); in service_ofldq()
1554 spin_lock(&q->sendq.lock); in service_ofldq()
1560 write_sgl(skb, &q->q, (void *)pos + hdr_len, in service_ofldq()
1564 skb->dev = q->adap->port[0]; in service_ofldq()
1567 last_desc = q->q.pidx + ndesc - 1; in service_ofldq()
1568 if (last_desc >= q->q.size) in service_ofldq()
1569 last_desc -= q->q.size; in service_ofldq()
1570 q->q.sdesc[last_desc].skb = skb; in service_ofldq()
1573 txq_advance(&q->q, ndesc); in service_ofldq()
1576 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1580 spin_lock(&q->sendq.lock); in service_ofldq()
1581 __skb_unlink(skb, &q->sendq); in service_ofldq()
1586 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1596 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) in ofld_xmit() argument
1599 spin_lock(&q->sendq.lock); in ofld_xmit()
1600 __skb_queue_tail(&q->sendq, skb); in ofld_xmit()
1601 if (q->sendq.qlen == 1) in ofld_xmit()
1602 service_ofldq(q); in ofld_xmit()
1603 spin_unlock(&q->sendq.lock); in ofld_xmit()
1615 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; in restart_ofldq() local
1617 spin_lock(&q->sendq.lock); in restart_ofldq()
1618 q->full = 0; /* the queue actually is completely empty now */ in restart_ofldq()
1619 service_ofldq(q); in restart_ofldq()
1620 spin_unlock(&q->sendq.lock); in restart_ofldq()
1843 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, in t4_ethrx_handler() argument
1849 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); in t4_ethrx_handler()
1850 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
1851 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
1858 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
1862 (q->netdev->features & NETIF_F_RXCSUM); in t4_ethrx_handler()
1864 !(cxgb_poll_busy_polling(q)) && in t4_ethrx_handler()
1865 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { in t4_ethrx_handler()
1878 skb->protocol = eth_type_trans(skb, q->netdev); in t4_ethrx_handler()
1879 skb_record_rx_queue(skb, q->idx); in t4_ethrx_handler()
1919 skb_mark_napi_id(skb, &q->napi); in t4_ethrx_handler()
1939 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, in restore_rx_bufs() argument
1945 if (q->cidx == 0) in restore_rx_bufs()
1946 q->cidx = q->size - 1; in restore_rx_bufs()
1948 q->cidx--; in restore_rx_bufs()
1949 d = &q->sdesc[q->cidx]; in restore_rx_bufs()
1952 q->avail++; in restore_rx_bufs()
1965 const struct sge_rspq *q) in is_new_response() argument
1967 return RSPD_GEN(r->type_gen) == q->gen; in is_new_response()
1976 static inline void rspq_next(struct sge_rspq *q) in rspq_next() argument
1978 q->cur_desc = (void *)q->cur_desc + q->iqe_len; in rspq_next()
1979 if (unlikely(++q->cidx == q->size)) { in rspq_next()
1980 q->cidx = 0; in rspq_next()
1981 q->gen ^= 1; in rspq_next()
1982 q->cur_desc = q->desc; in rspq_next()
1999 static int process_responses(struct sge_rspq *q, int budget) in process_responses() argument
2004 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); in process_responses()
2005 struct adapter *adapter = q->adap; in process_responses()
2009 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_responses()
2010 if (!is_new_response(rc, q)) in process_responses()
2022 if (likely(q->offset > 0)) { in process_responses()
2023 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
2024 q->offset = 0; in process_responses()
2035 fp->offset = q->offset; in process_responses()
2040 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
2047 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
2056 ret = q->handler(q, q->cur_desc, &si); in process_responses()
2058 q->offset += ALIGN(fp->size, s->fl_align); in process_responses()
2062 ret = q->handler(q, q->cur_desc, NULL); in process_responses()
2064 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); in process_responses()
2069 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); in process_responses()
2073 rspq_next(q); in process_responses()
2077 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) in process_responses()
2078 __refill_fl(q->adap, &rxq->fl); in process_responses()
2085 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); in cxgb_busy_poll() local
2089 if (!cxgb_poll_lock_poll(q)) in cxgb_busy_poll()
2092 work_done = process_responses(q, 4); in cxgb_busy_poll()
2094 q->next_intr_params = params; in cxgb_busy_poll()
2100 if (unlikely(!q->bar2_addr)) in cxgb_busy_poll()
2101 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in cxgb_busy_poll()
2102 val | INGRESSQID_V((u32)q->cntxt_id)); in cxgb_busy_poll()
2104 writel(val | INGRESSQID_V(q->bar2_qid), in cxgb_busy_poll()
2105 q->bar2_addr + SGE_UDB_GTS); in cxgb_busy_poll()
2109 cxgb_poll_unlock_poll(q); in cxgb_busy_poll()
2128 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); in napi_rx_handler() local
2132 if (!cxgb_poll_lock_napi(q)) in napi_rx_handler()
2135 work_done = process_responses(q, budget); in napi_rx_handler()
2140 timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params); in napi_rx_handler()
2142 if (q->adaptive_rx) { in napi_rx_handler()
2150 q->next_intr_params = QINTR_TIMER_IDX(timer_index) | in napi_rx_handler()
2152 params = q->next_intr_params; in napi_rx_handler()
2154 params = q->next_intr_params; in napi_rx_handler()
2155 q->next_intr_params = q->intr_params; in napi_rx_handler()
2165 if (unlikely(q->bar2_addr == NULL)) { in napi_rx_handler()
2166 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
2167 val | INGRESSQID_V((u32)q->cntxt_id)); in napi_rx_handler()
2169 writel(val | INGRESSQID_V(q->bar2_qid), in napi_rx_handler()
2170 q->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
2173 cxgb_poll_unlock_napi(q); in napi_rx_handler()
2182 struct sge_rspq *q = cookie; in t4_sge_intr_msix() local
2184 napi_schedule(&q->napi); in t4_sge_intr_msix()
2196 struct sge_rspq *q = &adap->sge.intrq; in process_intrq() local
2201 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_intrq()
2202 if (!is_new_response(rc, q)) in process_intrq()
2213 rspq_next(q); in process_intrq()
2216 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); in process_intrq()
2221 if (unlikely(q->bar2_addr == NULL)) { in process_intrq()
2223 val | INGRESSQID_V(q->cntxt_id)); in process_intrq()
2225 writel(val | INGRESSQID_V(q->bar2_qid), in process_intrq()
2226 q->bar2_addr + SGE_UDB_GTS); in process_intrq()
2386 struct sge_eth_txq *q = &s->ethtxq[i]; in sge_tx_timer_cb() local
2388 if (q->q.in_use && in sge_tx_timer_cb()
2389 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && in sge_tx_timer_cb()
2390 __netif_tx_trylock(q->txq)) { in sge_tx_timer_cb()
2391 int avail = reclaimable(&q->q); in sge_tx_timer_cb()
2397 free_tx_desc(adap, &q->q, avail, true); in sge_tx_timer_cb()
2398 q->q.in_use -= avail; in sge_tx_timer_cb()
2401 __netif_tx_unlock(q->txq); in sge_tx_timer_cb()
2555 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) in init_txq() argument
2557 q->cntxt_id = id; in init_txq()
2558 q->bar2_addr = bar2_address(adap, in init_txq()
2559 q->cntxt_id, in init_txq()
2561 &q->bar2_qid); in init_txq()
2562 q->in_use = 0; in init_txq()
2563 q->cidx = q->pidx = 0; in init_txq()
2564 q->stops = q->restarts = 0; in init_txq()
2565 q->stat = (void *)&q->desc[q->size]; in init_txq()
2566 spin_lock_init(&q->db_lock); in init_txq()
2567 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
2580 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
2582 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
2584 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
2586 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
2606 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
2610 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
2611 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
2614 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
2615 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
2619 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
2636 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
2638 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
2639 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
2641 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
2660 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
2666 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
2667 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
2671 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
2688 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ofld_txq()
2690 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_ofld_txq()
2692 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_ofld_txq()
2694 if (!txq->q.desc) in t4_sge_alloc_ofld_txq()
2712 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ofld_txq()
2716 kfree(txq->q.sdesc); in t4_sge_alloc_ofld_txq()
2717 txq->q.sdesc = NULL; in t4_sge_alloc_ofld_txq()
2720 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ofld_txq()
2721 txq->q.desc = NULL; in t4_sge_alloc_ofld_txq()
2725 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_ofld_txq()
2734 static void free_txq(struct adapter *adap, struct sge_txq *q) in free_txq() argument
2739 q->size * sizeof(struct tx_desc) + s->stat_len, in free_txq()
2740 q->desc, q->phys_addr); in free_txq()
2741 q->cntxt_id = 0; in free_txq()
2742 q->sdesc = NULL; in free_txq()
2743 q->desc = NULL; in free_txq()
2782 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) in t4_free_ofld_rxqs() argument
2784 for ( ; n; n--, q++) in t4_free_ofld_rxqs()
2785 if (q->rspq.desc) in t4_free_ofld_rxqs()
2786 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
2787 q->fl.size ? &q->fl : NULL); in t4_free_ofld_rxqs()
2807 if (etq->q.desc) { in t4_free_sge_resources()
2809 etq->q.cntxt_id); in t4_free_sge_resources()
2810 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
2811 kfree(etq->q.sdesc); in t4_free_sge_resources()
2812 free_txq(adap, &etq->q); in t4_free_sge_resources()
2823 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; in t4_free_sge_resources() local
2825 if (q->q.desc) { in t4_free_sge_resources()
2826 tasklet_kill(&q->qresume_tsk); in t4_free_sge_resources()
2828 q->q.cntxt_id); in t4_free_sge_resources()
2829 free_tx_desc(adap, &q->q, q->q.in_use, false); in t4_free_sge_resources()
2830 kfree(q->q.sdesc); in t4_free_sge_resources()
2831 __skb_queue_purge(&q->sendq); in t4_free_sge_resources()
2832 free_txq(adap, &q->q); in t4_free_sge_resources()
2840 if (cq->q.desc) { in t4_free_sge_resources()
2843 cq->q.cntxt_id); in t4_free_sge_resources()
2845 free_txq(adap, &cq->q); in t4_free_sge_resources()
2889 struct sge_ofld_txq *q = &s->ofldtxq[i]; in t4_sge_stop() local
2891 if (q->q.desc) in t4_sge_stop()
2892 tasklet_kill(&q->qresume_tsk); in t4_sge_stop()
2897 if (cq->q.desc) in t4_sge_stop()