Lines Matching refs:sge

254 struct sge {  struct
282 static void tx_sched_stop(struct sge *sge) in tx_sched_stop() argument
284 struct sched *s = sge->tx_sched; in tx_sched_stop()
297 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, in t1_sched_update_parms() argument
300 struct sched *s = sge->tx_sched; in t1_sched_update_parms()
320 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { in t1_sched_update_parms()
343 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
345 struct sched *s = sge->tx_sched;
350 t1_sched_update_parms(sge, i, 0, 0);
357 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
360 struct sched *s = sge->tx_sched;
363 t1_sched_update_parms(sge, port, 0, 0);
371 static int tx_sched_init(struct sge *sge) in tx_sched_init() argument
381 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); in tx_sched_init()
382 sge->tx_sched = s; in tx_sched_init()
386 t1_sched_update_parms(sge, i, 1500, 1000); in tx_sched_init()
397 static inline int sched_update_avail(struct sge *sge) in sched_update_avail() argument
399 struct sched *s = sge->tx_sched; in sched_update_avail()
431 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, in sched_skb() argument
434 struct sched *s = sge->tx_sched; in sched_skb()
472 if (update-- && sched_update_avail(sge)) in sched_skb()
480 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb()
484 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); in sched_skb()
525 static void free_rx_resources(struct sge *sge) in free_rx_resources() argument
527 struct pci_dev *pdev = sge->adapter->pdev; in free_rx_resources()
530 if (sge->respQ.entries) { in free_rx_resources()
531 size = sizeof(struct respQ_e) * sge->respQ.size; in free_rx_resources()
532 pci_free_consistent(pdev, size, sge->respQ.entries, in free_rx_resources()
533 sge->respQ.dma_addr); in free_rx_resources()
537 struct freelQ *q = &sge->freelQ[i]; in free_rx_resources()
555 static int alloc_rx_resources(struct sge *sge, struct sge_params *p) in alloc_rx_resources() argument
557 struct pci_dev *pdev = sge->adapter->pdev; in alloc_rx_resources()
561 struct freelQ *q = &sge->freelQ[i]; in alloc_rx_resources()
565 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; in alloc_rx_resources()
584 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + in alloc_rx_resources()
586 sge->freelQ[!sge->jumbo_fl].dma_offset; in alloc_rx_resources()
591 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; in alloc_rx_resources()
597 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; in alloc_rx_resources()
598 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; in alloc_rx_resources()
600 sge->respQ.genbit = 1; in alloc_rx_resources()
601 sge->respQ.size = SGE_RESPQ_E_N; in alloc_rx_resources()
602 sge->respQ.credits = 0; in alloc_rx_resources()
603 size = sizeof(struct respQ_e) * sge->respQ.size; in alloc_rx_resources()
604 sge->respQ.entries = in alloc_rx_resources()
605 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); in alloc_rx_resources()
606 if (!sge->respQ.entries) in alloc_rx_resources()
611 free_rx_resources(sge); in alloc_rx_resources()
618 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) in free_cmdQ_buffers() argument
621 struct pci_dev *pdev = sge->adapter->pdev; in free_cmdQ_buffers()
652 static void free_tx_resources(struct sge *sge) in free_tx_resources() argument
654 struct pci_dev *pdev = sge->adapter->pdev; in free_tx_resources()
658 struct cmdQ *q = &sge->cmdQ[i]; in free_tx_resources()
662 free_cmdQ_buffers(sge, q, q->in_use); in free_tx_resources()
676 static int alloc_tx_resources(struct sge *sge, struct sge_params *p) in alloc_tx_resources() argument
678 struct pci_dev *pdev = sge->adapter->pdev; in alloc_tx_resources()
682 struct cmdQ *q = &sge->cmdQ[i]; in alloc_tx_resources()
710 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * in alloc_tx_resources()
715 free_tx_resources(sge); in alloc_tx_resources()
733 struct sge *sge = adapter->sge; in t1_vlan_mode() local
736 sge->sge_control |= F_VLAN_XTRACT; in t1_vlan_mode()
738 sge->sge_control &= ~F_VLAN_XTRACT; in t1_vlan_mode()
740 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); in t1_vlan_mode()
749 static void configure_sge(struct sge *sge, struct sge_params *p) in configure_sge() argument
751 struct adapter *ap = sge->adapter; in configure_sge()
754 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, in configure_sge()
756 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, in configure_sge()
758 setup_ring_params(ap, sge->freelQ[0].dma_addr, in configure_sge()
759 sge->freelQ[0].size, A_SG_FL0BASELWR, in configure_sge()
761 setup_ring_params(ap, sge->freelQ[1].dma_addr, in configure_sge()
762 sge->freelQ[1].size, A_SG_FL1BASELWR, in configure_sge()
768 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, in configure_sge()
770 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); in configure_sge()
772 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | in configure_sge()
775 V_RX_PKT_OFFSET(sge->rx_pkt_pad); in configure_sge()
778 sge->sge_control |= F_ENABLE_BIG_ENDIAN; in configure_sge()
782 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); in configure_sge()
784 t1_sge_set_coalesce_params(sge, p); in configure_sge()
790 static inline unsigned int jumbo_payload_capacity(const struct sge *sge) in jumbo_payload_capacity() argument
792 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - in jumbo_payload_capacity()
793 sge->freelQ[sge->jumbo_fl].dma_offset - in jumbo_payload_capacity()
800 void t1_sge_destroy(struct sge *sge) in t1_sge_destroy() argument
804 for_each_port(sge->adapter, i) in t1_sge_destroy()
805 free_percpu(sge->port_stats[i]); in t1_sge_destroy()
807 kfree(sge->tx_sched); in t1_sge_destroy()
808 free_tx_resources(sge); in t1_sge_destroy()
809 free_rx_resources(sge); in t1_sge_destroy()
810 kfree(sge); in t1_sge_destroy()
825 static void refill_free_list(struct sge *sge, struct freelQ *q) in refill_free_list() argument
827 struct pci_dev *pdev = sge->adapter->pdev; in refill_free_list()
843 skb_reserve(skb, sge->rx_pkt_pad); in refill_free_list()
871 static void freelQs_empty(struct sge *sge) in freelQs_empty() argument
873 struct adapter *adapter = sge->adapter; in freelQs_empty()
877 refill_free_list(sge, &sge->freelQ[0]); in freelQs_empty()
878 refill_free_list(sge, &sge->freelQ[1]); in freelQs_empty()
880 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && in freelQs_empty()
881 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { in freelQs_empty()
883 irqholdoff_reg = sge->fixed_intrtimer; in freelQs_empty()
887 irqholdoff_reg = sge->intrtimer_nres; in freelQs_empty()
904 void t1_sge_intr_disable(struct sge *sge) in t1_sge_intr_disable() argument
906 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_disable()
908 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_disable()
909 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); in t1_sge_intr_disable()
915 void t1_sge_intr_enable(struct sge *sge) in t1_sge_intr_enable() argument
918 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_enable()
920 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) in t1_sge_intr_enable()
922 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); in t1_sge_intr_enable()
923 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_enable()
929 void t1_sge_intr_clear(struct sge *sge) in t1_sge_intr_clear() argument
931 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); in t1_sge_intr_clear()
932 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); in t1_sge_intr_clear()
938 int t1_sge_intr_error_handler(struct sge *sge) in t1_sge_intr_error_handler() argument
940 struct adapter *adapter = sge->adapter; in t1_sge_intr_error_handler()
946 sge->stats.respQ_empty++; in t1_sge_intr_error_handler()
948 sge->stats.respQ_overflow++; in t1_sge_intr_error_handler()
953 sge->stats.freelistQ_empty++; in t1_sge_intr_error_handler()
954 freelQs_empty(sge); in t1_sge_intr_error_handler()
957 sge->stats.pkt_too_big++; in t1_sge_intr_error_handler()
962 sge->stats.pkt_mismatch++; in t1_sge_intr_error_handler()
972 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) in t1_sge_get_intr_counts() argument
974 return &sge->stats; in t1_sge_get_intr_counts()
977 void t1_sge_get_port_stats(const struct sge *sge, int port, in t1_sge_get_port_stats() argument
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); in t1_sge_get_port_stats()
1289 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) in reclaim_completed_tx() argument
1296 free_cmdQ_buffers(sge, q, reclaim); in reclaim_completed_tx()
1307 struct sge *sge = (struct sge *) arg; in restart_sched() local
1308 struct adapter *adapter = sge->adapter; in restart_sched()
1309 struct cmdQ *q = &sge->cmdQ[0]; in restart_sched()
1314 reclaim_completed_tx(sge, q); in restart_sched()
1318 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { in restart_sched()
1353 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) in sge_rx() argument
1357 struct adapter *adapter = sge->adapter; in sge_rx()
1361 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); in sge_rx()
1363 sge->stats.rx_drops++; in sge_rx()
1374 st = this_cpu_ptr(sge->port_stats[p->iff]); in sge_rx()
1408 static void restart_tx_queues(struct sge *sge) in restart_tx_queues() argument
1410 struct adapter *adap = sge->adapter; in restart_tx_queues()
1413 if (!enough_free_Tx_descs(&sge->cmdQ[0])) in restart_tx_queues()
1419 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && in restart_tx_queues()
1421 sge->stats.cmdQ_restarted[2]++; in restart_tx_queues()
1435 struct sge *sge = adapter->sge; in update_tx_info() local
1436 struct cmdQ *cmdq = &sge->cmdQ[0]; in update_tx_info()
1440 freelQs_empty(sge); in update_tx_info()
1451 if (sge->tx_sched) in update_tx_info()
1452 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); in update_tx_info()
1457 if (unlikely(sge->stopped_tx_queues != 0)) in update_tx_info()
1458 restart_tx_queues(sge); in update_tx_info()
1469 struct sge *sge = adapter->sge; in process_responses() local
1470 struct respQ *q = &sge->respQ; in process_responses()
1492 sge->cmdQ[1].processed += cmdq_processed[1]; in process_responses()
1497 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; in process_responses()
1503 sge_rx(sge, fl, e->BufferLength); in process_responses()
1517 refill_free_list(sge, fl); in process_responses()
1519 sge->stats.pure_rsps++; in process_responses()
1536 sge->cmdQ[1].processed += cmdq_processed[1]; in process_responses()
1543 const struct respQ *Q = &adapter->sge->respQ; in responses_pending()
1559 struct sge *sge = adapter->sge; in process_pure_responses() local
1560 struct respQ *q = &sge->respQ; in process_pure_responses()
1562 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; in process_pure_responses()
1588 sge->stats.pure_rsps++; in process_pure_responses()
1592 sge->cmdQ[1].processed += cmdq_processed[1]; in process_pure_responses()
1609 writel(adapter->sge->respQ.cidx, in t1_poll()
1618 struct sge *sge = adapter->sge; in t1_interrupt() local
1629 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); in t1_interrupt()
1642 sge->stats.unhandled_irqs++; in t1_interrupt()
1663 struct sge *sge = adapter->sge; in t1_sge_tx() local
1664 struct cmdQ *q = &sge->cmdQ[qid]; in t1_sge_tx()
1670 reclaim_completed_tx(sge, q); in t1_sge_tx()
1681 set_bit(dev->if_port, &sge->stopped_tx_queues); in t1_sge_tx()
1682 sge->stats.cmdQ_full[2]++; in t1_sge_tx()
1692 set_bit(dev->if_port, &sge->stopped_tx_queues); in t1_sge_tx()
1693 sge->stats.cmdQ_full[2]++; in t1_sge_tx()
1699 if (sge->tx_sched && !qid && skb->dev) { in t1_sge_tx()
1705 skb = sched_skb(sge, skb, credits); in t1_sge_tx()
1775 struct sge *sge = adapter->sge; in t1_start_xmit() local
1776 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); in t1_start_xmit()
1841 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { in t1_start_xmit()
1844 adapter->sge->espibug_skb[dev->if_port] = skb; in t1_start_xmit()
1889 struct sge *sge = (struct sge *)data; in sge_tx_reclaim_cb() local
1892 struct cmdQ *q = &sge->cmdQ[i]; in sge_tx_reclaim_cb()
1897 reclaim_completed_tx(sge, q); in sge_tx_reclaim_cb()
1899 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); in sge_tx_reclaim_cb()
1903 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in sge_tx_reclaim_cb()
1909 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) in t1_sge_set_coalesce_params() argument
1911 sge->fixed_intrtimer = p->rx_coalesce_usecs * in t1_sge_set_coalesce_params()
1912 core_ticks_per_usec(sge->adapter); in t1_sge_set_coalesce_params()
1913 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); in t1_sge_set_coalesce_params()
1921 int t1_sge_configure(struct sge *sge, struct sge_params *p) in t1_sge_configure() argument
1923 if (alloc_rx_resources(sge, p)) in t1_sge_configure()
1925 if (alloc_tx_resources(sge, p)) { in t1_sge_configure()
1926 free_rx_resources(sge); in t1_sge_configure()
1929 configure_sge(sge, p); in t1_sge_configure()
1937 p->large_buf_capacity = jumbo_payload_capacity(sge); in t1_sge_configure()
1944 void t1_sge_stop(struct sge *sge) in t1_sge_stop() argument
1947 writel(0, sge->adapter->regs + A_SG_CONTROL); in t1_sge_stop()
1948 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ in t1_sge_stop()
1950 if (is_T2(sge->adapter)) in t1_sge_stop()
1951 del_timer_sync(&sge->espibug_timer); in t1_sge_stop()
1953 del_timer_sync(&sge->tx_reclaim_timer); in t1_sge_stop()
1954 if (sge->tx_sched) in t1_sge_stop()
1955 tx_sched_stop(sge); in t1_sge_stop()
1958 kfree_skb(sge->espibug_skb[i]); in t1_sge_stop()
1964 void t1_sge_start(struct sge *sge) in t1_sge_start() argument
1966 refill_free_list(sge, &sge->freelQ[0]); in t1_sge_start()
1967 refill_free_list(sge, &sge->freelQ[1]); in t1_sge_start()
1969 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); in t1_sge_start()
1970 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); in t1_sge_start()
1971 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ in t1_sge_start()
1973 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in t1_sge_start()
1975 if (is_T2(sge->adapter)) in t1_sge_start()
1976 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in t1_sge_start()
1985 struct sge *sge = adapter->sge; in espibug_workaround_t204() local
1996 struct sk_buff *skb = sge->espibug_skb[i]; in espibug_workaround_t204()
2022 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in espibug_workaround_t204()
2028 struct sge *sge = adapter->sge; in espibug_workaround() local
2031 struct sk_buff *skb = sge->espibug_skb[0]; in espibug_workaround()
2054 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in espibug_workaround()
2060 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) in t1_sge_create()
2062 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); in t1_sge_create() local
2065 if (!sge) in t1_sge_create()
2068 sge->adapter = adapter; in t1_sge_create()
2069 sge->netdev = adapter->port[0].dev; in t1_sge_create()
2070 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; in t1_sge_create()
2071 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; in t1_sge_create()
2074 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); in t1_sge_create()
2075 if (!sge->port_stats[i]) in t1_sge_create()
2079 init_timer(&sge->tx_reclaim_timer); in t1_sge_create()
2080 sge->tx_reclaim_timer.data = (unsigned long)sge; in t1_sge_create()
2081 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; in t1_sge_create()
2083 if (is_T2(sge->adapter)) { in t1_sge_create()
2084 init_timer(&sge->espibug_timer); in t1_sge_create()
2087 tx_sched_init(sge); in t1_sge_create()
2088 sge->espibug_timer.function = espibug_workaround_t204; in t1_sge_create()
2090 sge->espibug_timer.function = espibug_workaround; in t1_sge_create()
2091 sge->espibug_timer.data = (unsigned long)sge->adapter; in t1_sge_create()
2093 sge->espibug_timeout = 1; in t1_sge_create()
2096 sge->espibug_timeout = HZ/100; in t1_sge_create()
2102 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; in t1_sge_create()
2103 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; in t1_sge_create()
2104 if (sge->tx_sched) { in t1_sge_create()
2105 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) in t1_sge_create()
2115 return sge; in t1_sge_create()
2118 free_percpu(sge->port_stats[i]); in t1_sge_create()
2121 kfree(sge); in t1_sge_create()