Lines Matching refs:q

167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)  in fl_to_qset()  argument
169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
174 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
192 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
240 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
269 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, in free_tx_desc() argument
290 unsigned int cidx = q->cidx; in free_tx_desc()
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START; in free_tx_desc()
295 d = &q->sdesc[cidx]; in free_tx_desc()
299 unmap_skb(d->skb, q, cidx, pdev); in free_tx_desc()
306 if (++cidx == q->size) { in free_tx_desc()
308 d = q->sdesc; in free_tx_desc()
311 q->cidx = cidx; in free_tx_desc()
325 struct sge_txq *q, in reclaim_completed_tx() argument
328 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
332 free_tx_desc(adapter, q, reclaim); in reclaim_completed_tx()
333 q->cleaned += reclaim; in reclaim_completed_tx()
334 q->in_use -= reclaim; in reclaim_completed_tx()
336 return q->processed - q->cleaned; in reclaim_completed_tx()
345 static inline int should_restart_tx(const struct sge_txq *q) in should_restart_tx() argument
347 unsigned int r = q->processed - q->cleaned; in should_restart_tx()
349 return q->in_use - r < (q->size >> 1); in should_restart_tx()
352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, in clear_rx_desc() argument
355 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()
360 q->alloc_size, PCI_DMA_FROMDEVICE); in clear_rx_desc()
366 q->buf_size, PCI_DMA_FROMDEVICE); in clear_rx_desc()
380 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) in free_rx_bufs() argument
382 unsigned int cidx = q->cidx; in free_rx_bufs()
384 while (q->credits--) { in free_rx_bufs()
385 struct rx_sw_desc *d = &q->sdesc[cidx]; in free_rx_bufs()
388 clear_rx_desc(pdev, q, d); in free_rx_bufs()
389 if (++cidx == q->size) in free_rx_bufs()
393 if (q->pg_chunk.page) { in free_rx_bufs()
394 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()
395 q->pg_chunk.page = NULL; in free_rx_bufs()
442 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, in alloc_pg_chunk() argument
446 if (!q->pg_chunk.page) { in alloc_pg_chunk()
449 q->pg_chunk.page = alloc_pages(gfp, order); in alloc_pg_chunk()
450 if (unlikely(!q->pg_chunk.page)) in alloc_pg_chunk()
452 q->pg_chunk.va = page_address(q->pg_chunk.page); in alloc_pg_chunk()
453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - in alloc_pg_chunk()
455 q->pg_chunk.offset = 0; in alloc_pg_chunk()
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, in alloc_pg_chunk()
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); in alloc_pg_chunk()
458 q->pg_chunk.mapping = mapping; in alloc_pg_chunk()
460 sd->pg_chunk = q->pg_chunk; in alloc_pg_chunk()
464 q->pg_chunk.offset += q->buf_size; in alloc_pg_chunk()
465 if (q->pg_chunk.offset == (PAGE_SIZE << order)) in alloc_pg_chunk()
466 q->pg_chunk.page = NULL; in alloc_pg_chunk()
468 q->pg_chunk.va += q->buf_size; in alloc_pg_chunk()
469 get_page(q->pg_chunk.page); in alloc_pg_chunk()
480 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
482 if (q->pend_cred >= q->credits / 4) { in ring_fl_db()
483 q->pend_cred = 0; in ring_fl_db()
485 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
500 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) in refill_fl() argument
502 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
503 struct rx_desc *d = &q->desc[q->pidx]; in refill_fl()
510 if (q->use_pages) { in refill_fl()
511 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, in refill_fl()
512 q->order))) { in refill_fl()
513 nomem: q->alloc_failed++; in refill_fl()
519 add_one_rx_chunk(mapping, d, q->gen); in refill_fl()
521 q->buf_size - SGE_PG_RSVD, in refill_fl()
526 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); in refill_fl()
532 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, in refill_fl()
533 q->gen, adap->pdev); in refill_fl()
535 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
542 if (++q->pidx == q->size) { in refill_fl()
543 q->pidx = 0; in refill_fl()
544 q->gen ^= 1; in refill_fl()
545 sd = q->sdesc; in refill_fl()
546 d = q->desc; in refill_fl()
551 q->credits += count; in refill_fl()
552 q->pend_cred += count; in refill_fl()
553 ring_fl_db(adap, q); in refill_fl()
573 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, in recycle_rx_buf() argument
576 struct rx_desc *from = &q->desc[idx]; in recycle_rx_buf()
577 struct rx_desc *to = &q->desc[q->pidx]; in recycle_rx_buf()
579 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
583 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); in recycle_rx_buf()
584 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); in recycle_rx_buf()
586 if (++q->pidx == q->size) { in recycle_rx_buf()
587 q->pidx = 0; in recycle_rx_buf()
588 q->gen ^= 1; in recycle_rx_buf()
591 q->credits++; in recycle_rx_buf()
592 q->pend_cred++; in recycle_rx_buf()
593 ring_fl_db(adap, q); in recycle_rx_buf()
643 static void t3_reset_qset(struct sge_qset *q) in t3_reset_qset() argument
645 if (q->adap && in t3_reset_qset()
646 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
647 memset(q, 0, sizeof(*q)); in t3_reset_qset()
651 q->adap = NULL; in t3_reset_qset()
652 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset()
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
655 q->txq_stopped = 0; in t3_reset_qset()
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ in t3_reset_qset()
657 q->rx_reclaim_timer.function = NULL; in t3_reset_qset()
658 q->nomem = 0; in t3_reset_qset()
659 napi_free_frags(&q->napi); in t3_reset_qset()
672 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) in t3_free_qset() argument
678 if (q->fl[i].desc) { in t3_free_qset()
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
682 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
683 kfree(q->fl[i].sdesc); in t3_free_qset()
685 q->fl[i].size * in t3_free_qset()
686 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
687 q->fl[i].phys_addr); in t3_free_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
704 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
707 if (q->rspq.desc) { in t3_free_qset()
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset()
712 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset()
713 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset()
716 t3_reset_qset(q); in t3_free_qset()
839 struct sge_rspq *q, unsigned int len, in get_packet_pg() argument
847 newskb = skb = q->pg_skb; in get_packet_pg()
863 q->rx_recycle_buf++; in get_packet_pg()
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) in get_packet_pg()
1007 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) in check_ring_tx_db() argument
1010 clear_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1011 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { in check_ring_tx_db()
1012 set_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1014 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1019 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1051 const struct sge_txq *q, in write_wr_hdr_sgl() argument
1058 struct tx_sw_desc *sd = &q->sdesc[pidx]; in write_wr_hdr_sgl()
1098 if (++pidx == q->size) { in write_wr_hdr_sgl()
1101 d = q->desc; in write_wr_hdr_sgl()
1102 sd = q->sdesc; in write_wr_hdr_sgl()
1140 struct sge_txq *q, unsigned int ndesc, in write_tx_pkt_wr() argument
1145 struct tx_desc *d = &q->desc[pidx]; in write_tx_pkt_wr()
1176 q->sdesc[pidx].skb = NULL; in write_tx_pkt_wr()
1189 V_WR_TID(q->token)); in write_tx_pkt_wr()
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, in write_tx_pkt_wr()
1203 htonl(V_WR_TID(q->token))); in write_tx_pkt_wr()
1207 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
1211 q->stops++; in t3_stop_tx_queue()
1229 struct sge_txq *q; in t3_eth_xmit() local
1242 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in t3_eth_xmit()
1247 credits = q->size - q->in_use; in t3_eth_xmit()
1251 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1254 dev->name, q->cntxt_id & 7); in t3_eth_xmit()
1258 q->in_use += ndesc; in t3_eth_xmit()
1259 if (unlikely(credits - ndesc < q->stop_thres)) { in t3_eth_xmit()
1260 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1262 if (should_restart_tx(q) && in t3_eth_xmit()
1264 q->restarts++; in t3_eth_xmit()
1269 gen = q->gen; in t3_eth_xmit()
1270 q->unacked += ndesc; in t3_eth_xmit()
1271 compl = (q->unacked & 8) << (S_WR_COMPL - 3); in t3_eth_xmit()
1272 q->unacked &= 7; in t3_eth_xmit()
1273 pidx = q->pidx; in t3_eth_xmit()
1274 q->pidx += ndesc; in t3_eth_xmit()
1275 if (q->pidx >= q->size) { in t3_eth_xmit()
1276 q->pidx -= q->size; in t3_eth_xmit()
1277 q->gen ^= 1; in t3_eth_xmit()
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); in t3_eth_xmit()
1316 check_ring_tx_db(adap, q); in t3_eth_xmit()
1370 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, in check_desc_avail() argument
1374 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1375 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1378 if (unlikely(q->size - q->in_use < ndesc)) { in check_desc_avail()
1379 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail()
1384 if (should_restart_tx(q) && in check_desc_avail()
1388 q->stops++; in check_desc_avail()
1402 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1404 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx_imm()
1406 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1407 q->cleaned += reclaim; in reclaim_completed_tx_imm()
1425 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, in ctrl_xmit() argument
1438 wrp->wr_lo = htonl(V_WR_TID(q->token)); in ctrl_xmit()
1440 spin_lock(&q->lock); in ctrl_xmit()
1441 again:reclaim_completed_tx_imm(q); in ctrl_xmit()
1443 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); in ctrl_xmit()
1446 spin_unlock(&q->lock); in ctrl_xmit()
1452 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1454 q->in_use++; in ctrl_xmit()
1455 if (++q->pidx >= q->size) { in ctrl_xmit()
1456 q->pidx = 0; in ctrl_xmit()
1457 q->gen ^= 1; in ctrl_xmit()
1459 spin_unlock(&q->lock); in ctrl_xmit()
1462 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in ctrl_xmit()
1476 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq() local
1478 spin_lock(&q->lock); in restart_ctrlq()
1479 again:reclaim_completed_tx_imm(q); in restart_ctrlq()
1481 while (q->in_use < q->size && in restart_ctrlq()
1482 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1484 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1486 if (++q->pidx >= q->size) { in restart_ctrlq()
1487 q->pidx = 0; in restart_ctrlq()
1488 q->gen ^= 1; in restart_ctrlq()
1490 q->in_use++; in restart_ctrlq()
1493 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1497 if (should_restart_tx(q) && in restart_ctrlq()
1500 q->stops++; in restart_ctrlq()
1503 spin_unlock(&q->lock); in restart_ctrlq()
1506 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_ctrlq()
1579 struct sge_txq *q, unsigned int pidx, in write_ofld_wr() argument
1585 struct tx_desc *d = &q->desc[pidx]; in write_ofld_wr()
1588 q->sdesc[pidx].skb = NULL; in write_ofld_wr()
1610 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, in write_ofld_wr()
1643 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, in ofld_xmit() argument
1649 spin_lock(&q->lock); in ofld_xmit()
1650 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in ofld_xmit()
1652 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); in ofld_xmit()
1656 spin_unlock(&q->lock); in ofld_xmit()
1662 gen = q->gen; in ofld_xmit()
1663 q->in_use += ndesc; in ofld_xmit()
1664 pidx = q->pidx; in ofld_xmit()
1665 q->pidx += ndesc; in ofld_xmit()
1666 if (q->pidx >= q->size) { in ofld_xmit()
1667 q->pidx -= q->size; in ofld_xmit()
1668 q->gen ^= 1; in ofld_xmit()
1670 spin_unlock(&q->lock); in ofld_xmit()
1672 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); in ofld_xmit()
1673 check_ring_tx_db(adap, q); in ofld_xmit()
1687 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq() local
1691 spin_lock(&q->lock); in restart_offloadq()
1692 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in restart_offloadq()
1694 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1698 if (unlikely(q->size - q->in_use < ndesc)) { in restart_offloadq()
1702 if (should_restart_tx(q) && in restart_offloadq()
1705 q->stops++; in restart_offloadq()
1709 gen = q->gen; in restart_offloadq()
1710 q->in_use += ndesc; in restart_offloadq()
1711 pidx = q->pidx; in restart_offloadq()
1712 q->pidx += ndesc; in restart_offloadq()
1713 if (q->pidx >= q->size) { in restart_offloadq()
1714 q->pidx -= q->size; in restart_offloadq()
1715 q->gen ^= 1; in restart_offloadq()
1717 __skb_unlink(skb, &q->sendq); in restart_offloadq()
1718 spin_unlock(&q->lock); in restart_offloadq()
1720 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); in restart_offloadq()
1721 spin_lock(&q->lock); in restart_offloadq()
1723 spin_unlock(&q->lock); in restart_offloadq()
1726 set_bit(TXQ_RUNNING, &q->flags); in restart_offloadq()
1727 set_bit(TXQ_LAST_PKT_DB, &q->flags); in restart_offloadq()
1731 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_offloadq()
1787 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) in offload_enqueue() argument
1789 int was_empty = skb_queue_empty(&q->rx_queue); in offload_enqueue()
1791 __skb_queue_tail(&q->rx_queue, skb); in offload_enqueue()
1794 struct sge_qset *qs = rspq_to_qset(q); in offload_enqueue()
1810 struct sge_rspq *q, in deliver_partial_bundle() argument
1814 q->offload_bundles++; in deliver_partial_bundle()
1833 struct sge_rspq *q = &qs->rspq; in ofld_poll() local
1842 spin_lock_irq(&q->lock); in ofld_poll()
1844 skb_queue_splice_init(&q->rx_queue, &queue); in ofld_poll()
1847 spin_unlock_irq(&q->lock); in ofld_poll()
1850 spin_unlock_irq(&q->lock); in ofld_poll()
1862 q->offload_bundles++; in ofld_poll()
1870 spin_lock_irq(&q->lock); in ofld_poll()
1871 skb_queue_splice(&queue, &q->rx_queue); in ofld_poll()
1872 spin_unlock_irq(&q->lock); in ofld_poll()
1874 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
2220 const struct sge_rspq *q) in is_new_response() argument
2222 return (r->intr_gen & F_RSPD_GEN2) == q->gen; in is_new_response()
2225 static inline void clear_rspq_bufstate(struct sge_rspq * const q) in clear_rspq_bufstate() argument
2227 q->pg_skb = NULL; in clear_rspq_bufstate()
2228 q->rx_recycle_buf = 0; in clear_rspq_bufstate()
2258 struct sge_rspq *q = &qs->rspq; in process_responses() local
2259 struct rsp_desc *r = &q->desc[q->cidx]; in process_responses()
2265 q->next_holdoff = q->holdoff_tmr; in process_responses()
2267 while (likely(budget_left && is_new_response(r, q))) { in process_responses()
2288 q->async_notif++; in process_responses()
2293 q->next_holdoff = NOMEM_INTR_DELAY; in process_responses()
2294 q->nomem++; in process_responses()
2299 q->imm_data++; in process_responses()
2322 skb = get_packet_pg(adap, fl, q, in process_responses()
2326 q->pg_skb = skb; in process_responses()
2333 q->rx_drops++; in process_responses()
2340 q->pure_rsps++; in process_responses()
2348 if (unlikely(++q->cidx == q->size)) { in process_responses()
2349 q->cidx = 0; in process_responses()
2350 q->gen ^= 1; in process_responses()
2351 r = q->desc; in process_responses()
2355 if (++q->credits >= (q->size / 4)) { in process_responses()
2356 refill_rspq(adap, q, q->credits); in process_responses()
2357 q->credits = 0; in process_responses()
2366 rx_eth(adap, q, skb, ethpad, lro); in process_responses()
2368 q->offload_pkts++; in process_responses()
2372 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2378 clear_rspq_bufstate(q); in process_responses()
2383 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2465 struct sge_rspq *q = &qs->rspq; in process_pure_responses() local
2472 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
2473 q->cidx = 0; in process_pure_responses()
2474 q->gen ^= 1; in process_pure_responses()
2475 r = q->desc; in process_pure_responses()
2484 q->pure_rsps++; in process_pure_responses()
2485 if (++q->credits >= (q->size / 4)) { in process_pure_responses()
2486 refill_rspq(adap, q, q->credits); in process_pure_responses()
2487 q->credits = 0; in process_pure_responses()
2489 if (!is_new_response(r, q)) in process_pure_responses()
2501 return is_new_response(r, q); in process_pure_responses()
2519 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) in handle_responses() argument
2521 struct sge_qset *qs = rspq_to_qset(q); in handle_responses()
2522 struct rsp_desc *r = &q->desc[q->cidx]; in handle_responses()
2524 if (!is_new_response(r, q)) in handle_responses()
2528 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2529 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); in handle_responses()
2544 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix() local
2546 spin_lock(&q->lock); in t3_sge_intr_msix()
2548 q->unhandled_irqs++; in t3_sge_intr_msix()
2549 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2550 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_sge_intr_msix()
2551 spin_unlock(&q->lock); in t3_sge_intr_msix()
2562 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi() local
2564 spin_lock(&q->lock); in t3_sge_intr_msix_napi()
2566 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2567 q->unhandled_irqs++; in t3_sge_intr_msix_napi()
2568 spin_unlock(&q->lock); in t3_sge_intr_msix_napi()
2582 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi() local
2584 spin_lock(&q->lock); in t3_intr_msi()
2587 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2588 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_intr_msi()
2603 q->unhandled_irqs++; in t3_intr_msi()
2605 spin_unlock(&q->lock); in t3_intr_msi()
2611 struct sge_rspq *q = &qs->rspq; in rspq_check_napi() local
2614 is_new_response(&q->desc[q->cidx], q)) { in rspq_check_napi()
2632 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi() local
2634 spin_lock(&q->lock); in t3_intr_msi_napi()
2640 q->unhandled_irqs++; in t3_intr_msi_napi()
2642 spin_unlock(&q->lock); in t3_intr_msi_napi()
2976 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset() local
2978 init_qset_cntxt(q, id); in t3_sge_alloc_qset()
2979 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); in t3_sge_alloc_qset()
2980 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); in t3_sge_alloc_qset()
2982 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, in t3_sge_alloc_qset()
2985 &q->fl[0].phys_addr, &q->fl[0].sdesc); in t3_sge_alloc_qset()
2986 if (!q->fl[0].desc) in t3_sge_alloc_qset()
2989 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, in t3_sge_alloc_qset()
2992 &q->fl[1].phys_addr, &q->fl[1].sdesc); in t3_sge_alloc_qset()
2993 if (!q->fl[1].desc) in t3_sge_alloc_qset()
2996 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, in t3_sge_alloc_qset()
2998 &q->rspq.phys_addr, NULL); in t3_sge_alloc_qset()
2999 if (!q->rspq.desc) in t3_sge_alloc_qset()
3009 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], in t3_sge_alloc_qset()
3011 &q->txq[i].phys_addr, in t3_sge_alloc_qset()
3012 &q->txq[i].sdesc); in t3_sge_alloc_qset()
3013 if (!q->txq[i].desc) in t3_sge_alloc_qset()
3016 q->txq[i].gen = 1; in t3_sge_alloc_qset()
3017 q->txq[i].size = p->txq_size[i]; in t3_sge_alloc_qset()
3018 spin_lock_init(&q->txq[i].lock); in t3_sge_alloc_qset()
3019 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
3022 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, in t3_sge_alloc_qset()
3023 (unsigned long)q); in t3_sge_alloc_qset()
3024 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, in t3_sge_alloc_qset()
3025 (unsigned long)q); in t3_sge_alloc_qset()
3027 q->fl[0].gen = q->fl[1].gen = 1; in t3_sge_alloc_qset()
3028 q->fl[0].size = p->fl_size; in t3_sge_alloc_qset()
3029 q->fl[1].size = p->jumbo_size; in t3_sge_alloc_qset()
3031 q->rspq.gen = 1; in t3_sge_alloc_qset()
3032 q->rspq.size = p->rspq_size; in t3_sge_alloc_qset()
3033 spin_lock_init(&q->rspq.lock); in t3_sge_alloc_qset()
3034 skb_queue_head_init(&q->rspq.rx_queue); in t3_sge_alloc_qset()
3036 q->txq[TXQ_ETH].stop_thres = nports * in t3_sge_alloc_qset()
3040 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3042 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); in t3_sge_alloc_qset()
3045 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3047 q->fl[1].buf_size = is_offload(adapter) ? in t3_sge_alloc_qset()
3052 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3053 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3054 q->fl[0].order = FL0_PG_ORDER; in t3_sge_alloc_qset()
3055 q->fl[1].order = FL1_PG_ORDER; in t3_sge_alloc_qset()
3056 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3057 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3062 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, in t3_sge_alloc_qset()
3063 q->rspq.phys_addr, q->rspq.size, in t3_sge_alloc_qset()
3064 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); in t3_sge_alloc_qset()
3069 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, in t3_sge_alloc_qset()
3070 q->fl[i].phys_addr, q->fl[i].size, in t3_sge_alloc_qset()
3071 q->fl[i].buf_size - SGE_PG_RSVD, in t3_sge_alloc_qset()
3077 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, in t3_sge_alloc_qset()
3078 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, in t3_sge_alloc_qset()
3079 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, in t3_sge_alloc_qset()
3085 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, in t3_sge_alloc_qset()
3087 q->txq[TXQ_OFLD].phys_addr, in t3_sge_alloc_qset()
3088 q->txq[TXQ_OFLD].size, 0, 1, 0); in t3_sge_alloc_qset()
3094 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, in t3_sge_alloc_qset()
3096 q->txq[TXQ_CTRL].phys_addr, in t3_sge_alloc_qset()
3097 q->txq[TXQ_CTRL].size, in t3_sge_alloc_qset()
3098 q->txq[TXQ_CTRL].token, 1, 0); in t3_sge_alloc_qset()
3105 q->adap = adapter; in t3_sge_alloc_qset()
3106 q->netdev = dev; in t3_sge_alloc_qset()
3107 q->tx_q = netdevq; in t3_sge_alloc_qset()
3108 t3_update_qset_coalesce(q, p); in t3_sge_alloc_qset()
3110 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, in t3_sge_alloc_qset()
3116 if (avail < q->fl[0].size) in t3_sge_alloc_qset()
3120 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, in t3_sge_alloc_qset()
3122 if (avail < q->fl[1].size) in t3_sge_alloc_qset()
3125 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); in t3_sge_alloc_qset()
3127 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | in t3_sge_alloc_qset()
3128 V_NEWTIMER(q->rspq.holdoff_tmr)); in t3_sge_alloc_qset()
3135 t3_free_qset(adapter, q); in t3_sge_alloc_qset()
3150 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers() local
3152 if (q->tx_reclaim_timer.function) in t3_start_sge_timers()
3153 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in t3_start_sge_timers()
3155 if (q->rx_reclaim_timer.function) in t3_start_sge_timers()
3156 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); in t3_start_sge_timers()
3171 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers() local
3173 if (q->tx_reclaim_timer.function) in t3_stop_sge_timers()
3174 del_timer_sync(&q->tx_reclaim_timer); in t3_stop_sge_timers()
3175 if (q->rx_reclaim_timer.function) in t3_stop_sge_timers()
3176 del_timer_sync(&q->rx_reclaim_timer); in t3_stop_sge_timers()
3291 struct qset_params *q = p->qset + i; in t3_sge_prep() local
3293 q->polling = adap->params.rev > 0; in t3_sge_prep()
3294 q->coalesce_usecs = 5; in t3_sge_prep()
3295 q->rspq_size = 1024; in t3_sge_prep()
3296 q->fl_size = 1024; in t3_sge_prep()
3297 q->jumbo_size = 512; in t3_sge_prep()
3298 q->txq_size[TXQ_ETH] = 1024; in t3_sge_prep()
3299 q->txq_size[TXQ_OFLD] = 1024; in t3_sge_prep()
3300 q->txq_size[TXQ_CTRL] = 256; in t3_sge_prep()
3301 q->cong_thres = 0; in t3_sge_prep()