Lines Matching refs:ss
213 struct myri10ge_slice_state *ss; member
929 static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) in myri10ge_ss_init_lock() argument
931 spin_lock_init(&ss->lock); in myri10ge_ss_init_lock()
932 ss->state = SLICE_STATE_IDLE; in myri10ge_ss_init_lock()
935 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) in myri10ge_ss_lock_napi() argument
938 spin_lock(&ss->lock); in myri10ge_ss_lock_napi()
939 if ((ss->state & SLICE_LOCKED)) { in myri10ge_ss_lock_napi()
940 WARN_ON((ss->state & SLICE_STATE_NAPI)); in myri10ge_ss_lock_napi()
941 ss->state |= SLICE_STATE_NAPI_YIELD; in myri10ge_ss_lock_napi()
943 ss->lock_napi_yield++; in myri10ge_ss_lock_napi()
945 ss->state = SLICE_STATE_NAPI; in myri10ge_ss_lock_napi()
946 spin_unlock(&ss->lock); in myri10ge_ss_lock_napi()
950 static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) in myri10ge_ss_unlock_napi() argument
952 spin_lock(&ss->lock); in myri10ge_ss_unlock_napi()
953 WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); in myri10ge_ss_unlock_napi()
954 ss->state = SLICE_STATE_IDLE; in myri10ge_ss_unlock_napi()
955 spin_unlock(&ss->lock); in myri10ge_ss_unlock_napi()
958 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) in myri10ge_ss_lock_poll() argument
961 spin_lock_bh(&ss->lock); in myri10ge_ss_lock_poll()
962 if ((ss->state & SLICE_LOCKED)) { in myri10ge_ss_lock_poll()
963 ss->state |= SLICE_STATE_POLL_YIELD; in myri10ge_ss_lock_poll()
965 ss->lock_poll_yield++; in myri10ge_ss_lock_poll()
967 ss->state |= SLICE_STATE_POLL; in myri10ge_ss_lock_poll()
968 spin_unlock_bh(&ss->lock); in myri10ge_ss_lock_poll()
972 static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) in myri10ge_ss_unlock_poll() argument
974 spin_lock_bh(&ss->lock); in myri10ge_ss_unlock_poll()
975 WARN_ON((ss->state & SLICE_STATE_NAPI)); in myri10ge_ss_unlock_poll()
976 ss->state = SLICE_STATE_IDLE; in myri10ge_ss_unlock_poll()
977 spin_unlock_bh(&ss->lock); in myri10ge_ss_unlock_poll()
980 static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) in myri10ge_ss_busy_polling() argument
982 WARN_ON(!(ss->state & SLICE_LOCKED)); in myri10ge_ss_busy_polling()
983 return (ss->state & SLICE_USER_PEND); in myri10ge_ss_busy_polling()
986 static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) in myri10ge_ss_init_lock() argument
990 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) in myri10ge_ss_lock_napi() argument
995 static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) in myri10ge_ss_unlock_napi() argument
999 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) in myri10ge_ss_lock_poll() argument
1004 static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) in myri10ge_ss_unlock_poll() argument
1008 static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) in myri10ge_ss_busy_polling() argument
1017 struct myri10ge_slice_state *ss; in myri10ge_reset() local
1045 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); in myri10ge_reset()
1102 ss = &mgp->ss[i]; in myri10ge_reset()
1103 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); in myri10ge_reset()
1104 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); in myri10ge_reset()
1113 ss = &mgp->ss[i]; in myri10ge_reset()
1114 ss->irq_claim = in myri10ge_reset()
1134 ss = &mgp->ss[i]; in myri10ge_reset()
1136 ss->dca_tag = (__iomem __be32 *) in myri10ge_reset()
1139 ss->dca_tag = NULL; in myri10ge_reset()
1148 ss = &mgp->ss[i]; in myri10ge_reset()
1150 memset(ss->rx_done.entry, 0, bytes); in myri10ge_reset()
1151 ss->tx.req = 0; in myri10ge_reset()
1152 ss->tx.done = 0; in myri10ge_reset()
1153 ss->tx.pkt_start = 0; in myri10ge_reset()
1154 ss->tx.pkt_done = 0; in myri10ge_reset()
1155 ss->rx_big.cnt = 0; in myri10ge_reset()
1156 ss->rx_small.cnt = 0; in myri10ge_reset()
1157 ss->rx_done.idx = 0; in myri10ge_reset()
1158 ss->rx_done.cnt = 0; in myri10ge_reset()
1159 ss->tx.wake_queue = 0; in myri10ge_reset()
1160 ss->tx.stop_queue = 0; in myri10ge_reset()
1187 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) in myri10ge_write_dca() argument
1189 ss->cached_dca_tag = tag; in myri10ge_write_dca()
1190 put_be32(htonl(tag), ss->dca_tag); in myri10ge_write_dca()
1193 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) in myri10ge_update_dca() argument
1198 if (cpu != ss->cpu) { in myri10ge_update_dca()
1199 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); in myri10ge_update_dca()
1200 if (ss->cached_dca_tag != tag) in myri10ge_update_dca()
1201 myri10ge_write_dca(ss, cpu, tag); in myri10ge_update_dca()
1202 ss->cpu = cpu; in myri10ge_update_dca()
1212 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) in myri10ge_setup_dca()
1228 mgp->ss[i].cpu = -1; in myri10ge_setup_dca()
1229 mgp->ss[i].cached_dca_tag = -1; in myri10ge_setup_dca()
1230 myri10ge_update_dca(&mgp->ss[i]); in myri10ge_setup_dca()
1419 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) in myri10ge_rx_done() argument
1421 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_rx_done()
1432 rx = &ss->rx_small; in myri10ge_rx_done()
1435 rx = &ss->rx_big; in myri10ge_rx_done()
1448 polling = myri10ge_ss_busy_polling(ss); in myri10ge_rx_done()
1452 skb = napi_get_frags(&ss->napi); in myri10ge_rx_done()
1454 ss->stats.rx_dropped++; in myri10ge_rx_done()
1490 skb_record_rx_queue(skb, ss - &mgp->ss[0]); in myri10ge_rx_done()
1491 skb_mark_napi_id(skb, &ss->napi); in myri10ge_rx_done()
1512 napi_gro_frags(&ss->napi); in myri10ge_rx_done()
1518 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) in myri10ge_tx_done() argument
1520 struct pci_dev *pdev = ss->mgp->pdev; in myri10ge_tx_done()
1521 struct myri10ge_tx_buf *tx = &ss->tx; in myri10ge_tx_done()
1540 ss->stats.tx_bytes += skb->len; in myri10ge_tx_done()
1541 ss->stats.tx_packets++; in myri10ge_tx_done()
1557 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); in myri10ge_tx_done()
1567 if ((ss->mgp->dev->real_num_tx_queues > 1) && in myri10ge_tx_done()
1581 ss->mgp->running == MYRI10GE_ETH_RUNNING) { in myri10ge_tx_done()
1588 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) in myri10ge_clean_rx_done() argument
1590 struct myri10ge_rx_done *rx_done = &ss->rx_done; in myri10ge_clean_rx_done()
1591 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_clean_rx_done()
1605 rx_ok = myri10ge_rx_done(ss, length, checksum); in myri10ge_clean_rx_done()
1614 ss->stats.rx_packets += rx_packets; in myri10ge_clean_rx_done()
1615 ss->stats.rx_bytes += rx_bytes; in myri10ge_clean_rx_done()
1618 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1619 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_clean_rx_done()
1621 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1622 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); in myri10ge_clean_rx_done()
1629 struct mcp_irq_data *stats = mgp->ss[0].fw_stats; in myri10ge_check_statblock()
1664 struct myri10ge_slice_state *ss = in myri10ge_poll() local
1669 if (ss->mgp->dca_enabled) in myri10ge_poll()
1670 myri10ge_update_dca(ss); in myri10ge_poll()
1673 if (!myri10ge_ss_lock_napi(ss)) in myri10ge_poll()
1677 work_done = myri10ge_clean_rx_done(ss, budget); in myri10ge_poll()
1679 myri10ge_ss_unlock_napi(ss); in myri10ge_poll()
1682 put_be32(htonl(3), ss->irq_claim); in myri10ge_poll()
1690 struct myri10ge_slice_state *ss = in myri10ge_busy_poll() local
1692 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_busy_poll()
1699 if (!myri10ge_ss_lock_poll(ss)) in myri10ge_busy_poll()
1703 work_done = myri10ge_clean_rx_done(ss, 4); in myri10ge_busy_poll()
1705 ss->busy_poll_cnt += work_done; in myri10ge_busy_poll()
1707 ss->busy_poll_miss++; in myri10ge_busy_poll()
1709 myri10ge_ss_unlock_poll(ss); in myri10ge_busy_poll()
1717 struct myri10ge_slice_state *ss = arg; in myri10ge_intr() local
1718 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_intr()
1719 struct mcp_irq_data *stats = ss->fw_stats; in myri10ge_intr()
1720 struct myri10ge_tx_buf *tx = &ss->tx; in myri10ge_intr()
1726 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { in myri10ge_intr()
1727 napi_schedule(&ss->napi); in myri10ge_intr()
1738 napi_schedule(&ss->napi); in myri10ge_intr()
1755 myri10ge_tx_done(ss, (int)send_done_count); in myri10ge_intr()
1768 if (ss == mgp->ss) in myri10ge_intr()
1771 put_be32(htonl(3), ss->irq_claim + 1); in myri10ge_intr()
1885 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; in myri10ge_get_ringparam()
1886 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; in myri10ge_get_ringparam()
1888 ring->tx_max_pending = mgp->ss[0].tx.mask + 1; in myri10ge_get_ringparam()
1970 struct myri10ge_slice_state *ss; in myri10ge_get_ethtool_stats() local
1991 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); in myri10ge_get_ethtool_stats()
1997 ss = &mgp->ss[0]; in myri10ge_get_ethtool_stats()
1998 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); in myri10ge_get_ethtool_stats()
1999 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); in myri10ge_get_ethtool_stats()
2001 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); in myri10ge_get_ethtool_stats()
2002 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); in myri10ge_get_ethtool_stats()
2003 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); in myri10ge_get_ethtool_stats()
2004 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); in myri10ge_get_ethtool_stats()
2005 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); in myri10ge_get_ethtool_stats()
2007 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); in myri10ge_get_ethtool_stats()
2008 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); in myri10ge_get_ethtool_stats()
2009 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); in myri10ge_get_ethtool_stats()
2010 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); in myri10ge_get_ethtool_stats()
2011 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); in myri10ge_get_ethtool_stats()
2014 ss = &mgp->ss[slice]; in myri10ge_get_ethtool_stats()
2016 data[i++] = (unsigned int)ss->tx.pkt_start; in myri10ge_get_ethtool_stats()
2017 data[i++] = (unsigned int)ss->tx.pkt_done; in myri10ge_get_ethtool_stats()
2018 data[i++] = (unsigned int)ss->tx.req; in myri10ge_get_ethtool_stats()
2019 data[i++] = (unsigned int)ss->tx.done; in myri10ge_get_ethtool_stats()
2020 data[i++] = (unsigned int)ss->rx_small.cnt; in myri10ge_get_ethtool_stats()
2021 data[i++] = (unsigned int)ss->rx_big.cnt; in myri10ge_get_ethtool_stats()
2022 data[i++] = (unsigned int)ss->tx.wake_queue; in myri10ge_get_ethtool_stats()
2023 data[i++] = (unsigned int)ss->tx.stop_queue; in myri10ge_get_ethtool_stats()
2024 data[i++] = (unsigned int)ss->tx.linearized; in myri10ge_get_ethtool_stats()
2026 data[i++] = ss->lock_napi_yield; in myri10ge_get_ethtool_stats()
2027 data[i++] = ss->lock_poll_yield; in myri10ge_get_ethtool_stats()
2028 data[i++] = ss->busy_poll_miss; in myri10ge_get_ethtool_stats()
2029 data[i++] = ss->busy_poll_cnt; in myri10ge_get_ethtool_stats()
2117 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) in myri10ge_allocate_rings() argument
2119 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_allocate_rings()
2128 slice = ss - mgp->ss; in myri10ge_allocate_rings()
2140 ss->tx.mask = tx_ring_entries - 1; in myri10ge_allocate_rings()
2141 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; in myri10ge_allocate_rings()
2148 * sizeof(*ss->tx.req_list); in myri10ge_allocate_rings()
2149 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2150 if (ss->tx.req_bytes == NULL) in myri10ge_allocate_rings()
2154 ss->tx.req_list = (struct mcp_kreq_ether_send *) in myri10ge_allocate_rings()
2155 ALIGN((unsigned long)ss->tx.req_bytes, 8); in myri10ge_allocate_rings()
2156 ss->tx.queue_active = 0; in myri10ge_allocate_rings()
2158 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); in myri10ge_allocate_rings()
2159 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2160 if (ss->rx_small.shadow == NULL) in myri10ge_allocate_rings()
2163 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); in myri10ge_allocate_rings()
2164 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2165 if (ss->rx_big.shadow == NULL) in myri10ge_allocate_rings()
2170 bytes = tx_ring_entries * sizeof(*ss->tx.info); in myri10ge_allocate_rings()
2171 ss->tx.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2172 if (ss->tx.info == NULL) in myri10ge_allocate_rings()
2175 bytes = rx_ring_entries * sizeof(*ss->rx_small.info); in myri10ge_allocate_rings()
2176 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2177 if (ss->rx_small.info == NULL) in myri10ge_allocate_rings()
2180 bytes = rx_ring_entries * sizeof(*ss->rx_big.info); in myri10ge_allocate_rings()
2181 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2182 if (ss->rx_big.info == NULL) in myri10ge_allocate_rings()
2186 ss->rx_big.cnt = 0; in myri10ge_allocate_rings()
2187 ss->rx_small.cnt = 0; in myri10ge_allocate_rings()
2188 ss->rx_big.fill_cnt = 0; in myri10ge_allocate_rings()
2189 ss->rx_small.fill_cnt = 0; in myri10ge_allocate_rings()
2190 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2191 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2192 ss->rx_small.watchdog_needed = 0; in myri10ge_allocate_rings()
2193 ss->rx_big.watchdog_needed = 0; in myri10ge_allocate_rings()
2195 ss->rx_small.fill_cnt = ss->rx_small.mask + 1; in myri10ge_allocate_rings()
2197 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_allocate_rings()
2201 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { in myri10ge_allocate_rings()
2203 slice, ss->rx_small.fill_cnt); in myri10ge_allocate_rings()
2207 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); in myri10ge_allocate_rings()
2208 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { in myri10ge_allocate_rings()
2210 slice, ss->rx_big.fill_cnt); in myri10ge_allocate_rings()
2217 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { in myri10ge_allocate_rings()
2218 int idx = i & ss->rx_big.mask; in myri10ge_allocate_rings()
2219 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], in myri10ge_allocate_rings()
2221 put_page(ss->rx_big.info[idx].page); in myri10ge_allocate_rings()
2226 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_allocate_rings()
2227 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_allocate_rings()
2228 int idx = i & ss->rx_small.mask; in myri10ge_allocate_rings()
2229 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_allocate_rings()
2231 put_page(ss->rx_small.info[idx].page); in myri10ge_allocate_rings()
2234 kfree(ss->rx_big.info); in myri10ge_allocate_rings()
2237 kfree(ss->rx_small.info); in myri10ge_allocate_rings()
2240 kfree(ss->tx.info); in myri10ge_allocate_rings()
2243 kfree(ss->rx_big.shadow); in myri10ge_allocate_rings()
2246 kfree(ss->rx_small.shadow); in myri10ge_allocate_rings()
2249 kfree(ss->tx.req_bytes); in myri10ge_allocate_rings()
2250 ss->tx.req_bytes = NULL; in myri10ge_allocate_rings()
2251 ss->tx.req_list = NULL; in myri10ge_allocate_rings()
2257 static void myri10ge_free_rings(struct myri10ge_slice_state *ss) in myri10ge_free_rings() argument
2259 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_free_rings()
2265 if (ss->tx.req_list == NULL) in myri10ge_free_rings()
2268 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { in myri10ge_free_rings()
2269 idx = i & ss->rx_big.mask; in myri10ge_free_rings()
2270 if (i == ss->rx_big.fill_cnt - 1) in myri10ge_free_rings()
2271 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_free_rings()
2272 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], in myri10ge_free_rings()
2274 put_page(ss->rx_big.info[idx].page); in myri10ge_free_rings()
2278 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_free_rings()
2279 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_free_rings()
2280 idx = i & ss->rx_small.mask; in myri10ge_free_rings()
2281 if (i == ss->rx_small.fill_cnt - 1) in myri10ge_free_rings()
2282 ss->rx_small.info[idx].page_offset = in myri10ge_free_rings()
2284 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_free_rings()
2286 put_page(ss->rx_small.info[idx].page); in myri10ge_free_rings()
2288 tx = &ss->tx; in myri10ge_free_rings()
2299 ss->stats.tx_dropped++; in myri10ge_free_rings()
2314 kfree(ss->rx_big.info); in myri10ge_free_rings()
2316 kfree(ss->rx_small.info); in myri10ge_free_rings()
2318 kfree(ss->tx.info); in myri10ge_free_rings()
2320 kfree(ss->rx_big.shadow); in myri10ge_free_rings()
2322 kfree(ss->rx_small.shadow); in myri10ge_free_rings()
2324 kfree(ss->tx.req_bytes); in myri10ge_free_rings()
2325 ss->tx.req_bytes = NULL; in myri10ge_free_rings()
2326 ss->tx.req_list = NULL; in myri10ge_free_rings()
2332 struct myri10ge_slice_state *ss; in myri10ge_request_irq() local
2364 ss = &mgp->ss[i]; in myri10ge_request_irq()
2365 snprintf(ss->irq_desc, sizeof(ss->irq_desc), in myri10ge_request_irq()
2368 myri10ge_intr, 0, ss->irq_desc, in myri10ge_request_irq()
2369 ss); in myri10ge_request_irq()
2376 &mgp->ss[i]); in myri10ge_request_irq()
2385 mgp->dev->name, &mgp->ss[0]); in myri10ge_request_irq()
2402 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); in myri10ge_free_irq()
2404 free_irq(pdev->irq, &mgp->ss[0]); in myri10ge_free_irq()
2415 struct myri10ge_slice_state *ss; in myri10ge_get_txrx() local
2418 ss = &mgp->ss[slice]; in myri10ge_get_txrx()
2424 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) in myri10ge_get_txrx()
2430 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
2435 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
2438 ss->tx.send_go = (__iomem __be32 *) in myri10ge_get_txrx()
2440 ss->tx.send_stop = (__iomem __be32 *) in myri10ge_get_txrx()
2449 struct myri10ge_slice_state *ss; in myri10ge_set_stats() local
2452 ss = &mgp->ss[slice]; in myri10ge_set_stats()
2453 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); in myri10ge_set_stats()
2454 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); in myri10ge_set_stats()
2458 dma_addr_t bus = ss->fw_stats_bus; in myri10ge_set_stats()
2477 struct myri10ge_slice_state *ss; in myri10ge_open() local
2571 ss = &mgp->ss[slice]; in myri10ge_open()
2578 status = myri10ge_allocate_rings(ss); in myri10ge_open()
2593 myri10ge_ss_init_lock(ss); in myri10ge_open()
2596 napi_enable(&(ss)->napi); in myri10ge_open()
2644 napi_disable(&mgp->ss[slice].napi); in myri10ge_open()
2647 myri10ge_free_rings(&mgp->ss[i]); in myri10ge_open()
2666 if (mgp->ss[0].tx.req_bytes == NULL) in myri10ge_close()
2673 napi_disable(&mgp->ss[i].napi); in myri10ge_close()
2678 while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { in myri10ge_close()
2703 myri10ge_free_rings(&mgp->ss[i]); in myri10ge_close()
2819 struct myri10ge_slice_state *ss; in myri10ge_xmit() local
2834 ss = &mgp->ss[queue]; in myri10ge_xmit()
2836 tx = &ss->tx; in myri10ge_xmit()
2913 ss->stats.tx_dropped += 1; in myri10ge_xmit()
3079 ss->stats.tx_dropped += 1; in myri10ge_xmit()
3089 struct myri10ge_slice_state *ss; in myri10ge_sw_tso() local
3116 ss = &mgp->ss[skb_get_queue_mapping(skb)]; in myri10ge_sw_tso()
3118 ss->stats.tx_dropped += 1; in myri10ge_sw_tso()
3130 slice_stats = &mgp->ss[i].stats; in myri10ge_get_stats()
3567 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, in myri10ge_check_slice() argument
3570 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_check_slice()
3571 int slice = ss - mgp->ss; in myri10ge_check_slice()
3573 if (ss->tx.req != ss->tx.done && in myri10ge_check_slice()
3574 ss->tx.done == ss->watchdog_tx_done && in myri10ge_check_slice()
3575 ss->watchdog_tx_req != ss->watchdog_tx_done) { in myri10ge_check_slice()
3584 slice, ss->tx.queue_active, ss->tx.req, in myri10ge_check_slice()
3585 ss->tx.done, ss->tx.pkt_start, in myri10ge_check_slice()
3586 ss->tx.pkt_done, in myri10ge_check_slice()
3587 (int)ntohl(mgp->ss[slice].fw_stats-> in myri10ge_check_slice()
3590 ss->stuck = 1; in myri10ge_check_slice()
3593 if (ss->watchdog_tx_done != ss->tx.done || in myri10ge_check_slice()
3594 ss->watchdog_rx_done != ss->rx_done.cnt) { in myri10ge_check_slice()
3597 ss->watchdog_tx_done = ss->tx.done; in myri10ge_check_slice()
3598 ss->watchdog_tx_req = ss->tx.req; in myri10ge_check_slice()
3599 ss->watchdog_rx_done = ss->rx_done.cnt; in myri10ge_check_slice()
3610 struct myri10ge_slice_state *ss; in myri10ge_watchdog() local
3661 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); in myri10ge_watchdog()
3663 ss = mgp->ss; in myri10ge_watchdog()
3664 if (ss->stuck) { in myri10ge_watchdog()
3665 myri10ge_check_slice(ss, &reset_needed, in myri10ge_watchdog()
3668 ss->stuck = 0; in myri10ge_watchdog()
3701 struct myri10ge_slice_state *ss; in myri10ge_watchdog_timer() local
3708 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); in myri10ge_watchdog_timer()
3713 ss = &mgp->ss[i]; in myri10ge_watchdog_timer()
3714 if (ss->rx_small.watchdog_needed) { in myri10ge_watchdog_timer()
3715 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_watchdog_timer()
3718 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= in myri10ge_watchdog_timer()
3720 ss->rx_small.watchdog_needed = 0; in myri10ge_watchdog_timer()
3722 if (ss->rx_big.watchdog_needed) { in myri10ge_watchdog_timer()
3723 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, in myri10ge_watchdog_timer()
3725 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= in myri10ge_watchdog_timer()
3727 ss->rx_big.watchdog_needed = 0; in myri10ge_watchdog_timer()
3729 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, in myri10ge_watchdog_timer()
3754 struct myri10ge_slice_state *ss; in myri10ge_free_slices() local
3759 if (mgp->ss == NULL) in myri10ge_free_slices()
3763 ss = &mgp->ss[i]; in myri10ge_free_slices()
3764 if (ss->rx_done.entry != NULL) { in myri10ge_free_slices()
3766 sizeof(*ss->rx_done.entry); in myri10ge_free_slices()
3768 ss->rx_done.entry, ss->rx_done.bus); in myri10ge_free_slices()
3769 ss->rx_done.entry = NULL; in myri10ge_free_slices()
3771 if (ss->fw_stats != NULL) { in myri10ge_free_slices()
3772 bytes = sizeof(*ss->fw_stats); in myri10ge_free_slices()
3774 ss->fw_stats, ss->fw_stats_bus); in myri10ge_free_slices()
3775 ss->fw_stats = NULL; in myri10ge_free_slices()
3777 napi_hash_del(&ss->napi); in myri10ge_free_slices()
3778 netif_napi_del(&ss->napi); in myri10ge_free_slices()
3782 kfree(mgp->ss); in myri10ge_free_slices()
3783 mgp->ss = NULL; in myri10ge_free_slices()
3788 struct myri10ge_slice_state *ss; in myri10ge_alloc_slices() local
3793 bytes = sizeof(*mgp->ss) * mgp->num_slices; in myri10ge_alloc_slices()
3794 mgp->ss = kzalloc(bytes, GFP_KERNEL); in myri10ge_alloc_slices()
3795 if (mgp->ss == NULL) { in myri10ge_alloc_slices()
3800 ss = &mgp->ss[i]; in myri10ge_alloc_slices()
3801 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); in myri10ge_alloc_slices()
3802 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, in myri10ge_alloc_slices()
3803 &ss->rx_done.bus, in myri10ge_alloc_slices()
3805 if (ss->rx_done.entry == NULL) in myri10ge_alloc_slices()
3807 bytes = sizeof(*ss->fw_stats); in myri10ge_alloc_slices()
3808 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, in myri10ge_alloc_slices()
3809 &ss->fw_stats_bus, in myri10ge_alloc_slices()
3811 if (ss->fw_stats == NULL) in myri10ge_alloc_slices()
3813 ss->mgp = mgp; in myri10ge_alloc_slices()
3814 ss->dev = mgp->dev; in myri10ge_alloc_slices()
3815 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, in myri10ge_alloc_slices()
3817 napi_hash_add(&ss->napi); in myri10ge_alloc_slices()