Lines Matching refs:adapter

49 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
55 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) in vmxnet3_enable_intr() argument
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); in vmxnet3_enable_intr()
62 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) in vmxnet3_disable_intr() argument
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); in vmxnet3_disable_intr()
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) in vmxnet3_enable_all_intrs() argument
76 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_enable_all_intrs()
77 vmxnet3_enable_intr(adapter, i); in vmxnet3_enable_all_intrs()
78 adapter->shared->devRead.intrConf.intrCtrl &= in vmxnet3_enable_all_intrs()
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) in vmxnet3_disable_all_intrs() argument
88 adapter->shared->devRead.intrConf.intrCtrl |= in vmxnet3_disable_all_intrs()
90 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_disable_all_intrs()
91 vmxnet3_disable_intr(adapter, i); in vmxnet3_disable_all_intrs()
96 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) in vmxnet3_ack_events() argument
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); in vmxnet3_ack_events()
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stopped() argument
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_start() argument
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_wake() argument
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stop() argument
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
138 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) in vmxnet3_check_link() argument
144 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_check_link()
145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); in vmxnet3_check_link()
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_check_link()
147 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_check_link()
149 adapter->link_speed = ret >> 16; in vmxnet3_check_link()
151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", in vmxnet3_check_link()
152 adapter->link_speed); in vmxnet3_check_link()
153 netif_carrier_on(adapter->netdev); in vmxnet3_check_link()
156 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
157 vmxnet3_tq_start(&adapter->tx_queue[i], in vmxnet3_check_link()
158 adapter); in vmxnet3_check_link()
161 netdev_info(adapter->netdev, "NIC Link is Down\n"); in vmxnet3_check_link()
162 netif_carrier_off(adapter->netdev); in vmxnet3_check_link()
165 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); in vmxnet3_check_link()
172 vmxnet3_process_events(struct vmxnet3_adapter *adapter) in vmxnet3_process_events() argument
176 u32 events = le32_to_cpu(adapter->shared->ecr); in vmxnet3_process_events()
180 vmxnet3_ack_events(adapter, events); in vmxnet3_process_events()
184 vmxnet3_check_link(adapter, true); in vmxnet3_process_events()
188 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_process_events()
189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_process_events()
191 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_process_events()
193 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events()
194 if (adapter->tqd_start[i].status.stopped) in vmxnet3_process_events()
195 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
197 adapter->netdev->name, i, le32_to_cpu( in vmxnet3_process_events()
198 adapter->tqd_start[i].status.error)); in vmxnet3_process_events()
199 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_process_events()
200 if (adapter->rqd_start[i].status.stopped) in vmxnet3_process_events()
201 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
203 adapter->netdev->name, i, in vmxnet3_process_events()
204 adapter->rqd_start[i].status.error); in vmxnet3_process_events()
206 schedule_work(&adapter->work); in vmxnet3_process_events()
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter) in vmxnet3_unmap_pkt() argument
365 struct vmxnet3_adapter *adapter) in vmxnet3_tq_tx_complete() argument
373 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
374 adapter); in vmxnet3_tq_tx_complete()
382 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && in vmxnet3_tq_tx_complete()
385 netif_carrier_ok(adapter->netdev))) { in vmxnet3_tq_tx_complete()
386 vmxnet3_tq_wake(tq, adapter); in vmxnet3_tq_tx_complete()
396 struct vmxnet3_adapter *adapter) in vmxnet3_tq_cleanup() argument
405 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); in vmxnet3_tq_cleanup()
429 struct vmxnet3_adapter *adapter) in vmxnet3_tq_destroy() argument
432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * in vmxnet3_tq_destroy()
444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
450 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
460 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) in vmxnet3_tq_destroy_all() argument
464 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all()
465 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); in vmxnet3_tq_destroy_all()
471 struct vmxnet3_adapter *adapter) in vmxnet3_tq_init() argument
501 struct vmxnet3_adapter *adapter) in vmxnet3_tq_create() argument
508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
512 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); in vmxnet3_tq_create()
516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
520 netdev_err(adapter->netdev, "failed to allocate data ring\n"); in vmxnet3_tq_create()
524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); in vmxnet3_tq_create()
533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_tq_create()
541 vmxnet3_tq_destroy(tq, adapter); in vmxnet3_tq_create()
546 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) in vmxnet3_tq_cleanup_all() argument
550 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all()
551 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); in vmxnet3_tq_cleanup_all()
562 int num_to_alloc, struct vmxnet3_adapter *adapter) in vmxnet3_rq_alloc_rx_buf() argument
578 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
587 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
590 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
611 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
614 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
640 netdev_dbg(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
672 struct vmxnet3_adapter *adapter) in vmxnet3_map_pkt() argument
699 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, in vmxnet3_map_pkt()
729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
741 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, in vmxnet3_map_pkt()
771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
783 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
807 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) in vmxnet3_tq_init_all() argument
811 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all()
812 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); in vmxnet3_tq_init_all()
836 struct vmxnet3_adapter *adapter) in vmxnet3_parse_and_copy_hdr() argument
898 netdev_dbg(adapter->netdev, in vmxnet3_parse_and_copy_hdr()
956 struct vmxnet3_adapter *adapter, struct net_device *netdev) in vmxnet3_tq_xmit() argument
1005 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1007 " next2fill %u\n", adapter->netdev->name, in vmxnet3_tq_xmit()
1010 vmxnet3_tq_stop(tq, adapter); in vmxnet3_tq_xmit()
1016 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1089 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1100 VMXNET3_WRITE_BAR0_REG(adapter, in vmxnet3_tq_xmit()
1121 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_xmit_frame() local
1123 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame()
1125 &adapter->tx_queue[skb->queue_mapping], in vmxnet3_xmit_frame()
1126 adapter, netdev); in vmxnet3_xmit_frame()
1131 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, in vmxnet3_rx_csum() argument
1135 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { in vmxnet3_rx_csum()
1159 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) in vmxnet3_rx_error() argument
1185 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, in vmxnet3_get_hdr_len() argument
1232 struct vmxnet3_adapter *adapter, int quota) in vmxnet3_rq_rx_complete() argument
1265 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; in vmxnet3_rq_rx_complete()
1275 vmxnet3_rx_error(rq, rcd, ctx, adapter); in vmxnet3_rq_rx_complete()
1289 netdev_dbg(adapter->netdev, in vmxnet3_rq_rx_complete()
1297 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_rx_complete()
1309 new_dma_addr = dma_map_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1312 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1325 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, in vmxnet3_rq_rx_complete()
1331 (adapter->netdev->features & NETIF_F_RXHASH)) in vmxnet3_rq_rx_complete()
1343 if (adapter->version == 2 && in vmxnet3_rq_rx_complete()
1383 new_dma_addr = dma_map_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1387 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1397 dma_unmap_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1414 u32 mtu = adapter->netdev->mtu; in vmxnet3_rq_rx_complete()
1417 vmxnet3_rx_csum(adapter, skb, in vmxnet3_rq_rx_complete()
1419 skb->protocol = eth_type_trans(skb, adapter->netdev); in vmxnet3_rq_rx_complete()
1420 if (!rcd->tcp || !adapter->lro) in vmxnet3_rq_rx_complete()
1431 hlen = vmxnet3_get_hdr_len(adapter, skb, in vmxnet3_rq_rx_complete()
1451 if (adapter->netdev->features & NETIF_F_LRO) in vmxnet3_rq_rx_complete()
1478 VMXNET3_WRITE_BAR0_REG(adapter, in vmxnet3_rq_rx_complete()
1494 struct vmxnet3_adapter *adapter) in vmxnet3_rq_cleanup() argument
1509 dma_unmap_single(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
1515 dma_unmap_page(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
1533 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_cleanup_all() argument
1537 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_cleanup_all()
1538 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); in vmxnet3_rq_cleanup_all()
1543 struct vmxnet3_adapter *adapter) in vmxnet3_rq_destroy() argument
1559 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
1570 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size in vmxnet3_rq_destroy()
1579 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], in vmxnet3_rq_destroy()
1587 struct vmxnet3_adapter *adapter) in vmxnet3_rq_init() argument
1595 if (i % adapter->rx_buf_per_pkt == 0) { in vmxnet3_rq_init()
1597 rq->buf_info[0][i].len = adapter->skb_buf_size; in vmxnet3_rq_init()
1617 adapter) == 0) { in vmxnet3_rq_init()
1621 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); in vmxnet3_rq_init()
1638 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_init_all() argument
1642 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_init_all()
1643 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); in vmxnet3_rq_init_all()
1645 dev_err(&adapter->netdev->dev, "%s: failed to " in vmxnet3_rq_init_all()
1647 adapter->netdev->name, i); in vmxnet3_rq_init_all()
1657 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) in vmxnet3_rq_create() argument
1667 &adapter->pdev->dev, sz, in vmxnet3_rq_create()
1671 netdev_err(adapter->netdev, in vmxnet3_rq_create()
1678 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
1682 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); in vmxnet3_rq_create()
1688 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, in vmxnet3_rq_create()
1699 vmxnet3_rq_destroy(rq, adapter); in vmxnet3_rq_create()
1705 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_create_all() argument
1709 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_create_all()
1710 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); in vmxnet3_rq_create_all()
1712 dev_err(&adapter->netdev->dev, in vmxnet3_rq_create_all()
1714 adapter->netdev->name, i); in vmxnet3_rq_create_all()
1720 vmxnet3_rq_destroy_all(adapter); in vmxnet3_rq_create_all()
1728 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) in vmxnet3_do_poll() argument
1731 if (unlikely(adapter->shared->ecr)) in vmxnet3_do_poll()
1732 vmxnet3_process_events(adapter); in vmxnet3_do_poll()
1733 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll()
1734 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); in vmxnet3_do_poll()
1736 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_do_poll()
1737 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], in vmxnet3_do_poll()
1738 adapter, budget); in vmxnet3_do_poll()
1750 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); in vmxnet3_poll()
1754 vmxnet3_enable_all_intrs(rx_queue->adapter); in vmxnet3_poll()
1769 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_poll_rx_only() local
1775 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_poll_rx_only()
1777 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only()
1778 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_poll_rx_only()
1781 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); in vmxnet3_poll_rx_only()
1785 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_poll_rx_only()
1802 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx() local
1804 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_tx()
1805 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
1808 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_msix_tx()
1810 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx()
1811 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx()
1812 vmxnet3_tq_tx_complete(txq, adapter); in vmxnet3_msix_tx()
1815 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_msix_tx()
1817 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
1832 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_msix_rx() local
1835 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_rx()
1836 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_msix_rx()
1859 struct vmxnet3_adapter *adapter = netdev_priv(dev); in vmxnet3_msix_event() local
1862 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_event()
1863 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
1865 if (adapter->shared->ecr) in vmxnet3_msix_event()
1866 vmxnet3_process_events(adapter); in vmxnet3_msix_event()
1868 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
1881 struct vmxnet3_adapter *adapter = netdev_priv(dev); in vmxnet3_intr() local
1883 if (adapter->intr.type == VMXNET3_IT_INTX) { in vmxnet3_intr()
1884 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); in vmxnet3_intr()
1892 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_intr()
1893 vmxnet3_disable_all_intrs(adapter); in vmxnet3_intr()
1895 napi_schedule(&adapter->rx_queue[0].napi); in vmxnet3_intr()
1906 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_netpoll() local
1908 switch (adapter->intr.type) { in vmxnet3_netpoll()
1912 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_netpoll()
1913 vmxnet3_msix_rx(0, &adapter->rx_queue[i]); in vmxnet3_netpoll()
1919 vmxnet3_intr(0, adapter->netdev); in vmxnet3_netpoll()
1927 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) in vmxnet3_request_irqs() argument
1929 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_request_irqs()
1934 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
1935 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs()
1936 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_request_irqs()
1937 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", in vmxnet3_request_irqs()
1938 adapter->netdev->name, vector); in vmxnet3_request_irqs()
1942 adapter->tx_queue[i].name, in vmxnet3_request_irqs()
1943 &adapter->tx_queue[i]); in vmxnet3_request_irqs()
1945 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
1946 adapter->netdev->name, vector); in vmxnet3_request_irqs()
1949 dev_err(&adapter->netdev->dev, in vmxnet3_request_irqs()
1952 adapter->tx_queue[i].name, err); in vmxnet3_request_irqs()
1958 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_request_irqs()
1959 for (; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
1960 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
1965 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
1969 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
1972 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
1973 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
1974 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", in vmxnet3_request_irqs()
1975 adapter->netdev->name, vector); in vmxnet3_request_irqs()
1977 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
1978 adapter->netdev->name, vector); in vmxnet3_request_irqs()
1981 adapter->rx_queue[i].name, in vmxnet3_request_irqs()
1982 &(adapter->rx_queue[i])); in vmxnet3_request_irqs()
1984 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
1987 adapter->rx_queue[i].name, err); in vmxnet3_request_irqs()
1991 adapter->rx_queue[i].comp_ring.intr_idx = vector++; in vmxnet3_request_irqs()
1995 adapter->netdev->name, vector); in vmxnet3_request_irqs()
1998 intr->event_msi_vector_name, adapter->netdev); in vmxnet3_request_irqs()
2002 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2003 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, in vmxnet3_request_irqs()
2004 adapter->netdev->name, adapter->netdev); in vmxnet3_request_irqs()
2007 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2008 err = request_irq(adapter->pdev->irq, vmxnet3_intr, in vmxnet3_request_irqs()
2009 IRQF_SHARED, adapter->netdev->name, in vmxnet3_request_irqs()
2010 adapter->netdev); in vmxnet3_request_irqs()
2016 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2021 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2022 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_request_irqs()
2024 rq->qid2 = i + adapter->num_rx_queues; in vmxnet3_request_irqs()
2032 if (adapter->intr.type != VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2033 adapter->intr.event_intr_idx = 0; in vmxnet3_request_irqs()
2034 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2035 adapter->tx_queue[i].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2036 adapter->rx_queue[0].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2039 netdev_info(adapter->netdev, in vmxnet3_request_irqs()
2049 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) in vmxnet3_free_irqs() argument
2051 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_free_irqs()
2060 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_free_irqs()
2061 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_free_irqs()
2063 &(adapter->tx_queue[i])); in vmxnet3_free_irqs()
2064 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) in vmxnet3_free_irqs()
2069 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_free_irqs()
2071 &(adapter->rx_queue[i])); in vmxnet3_free_irqs()
2075 adapter->netdev); in vmxnet3_free_irqs()
2081 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2084 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2093 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) in vmxnet3_restore_vlan() argument
2095 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_restore_vlan()
2101 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in vmxnet3_restore_vlan()
2109 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_vlan_rx_add_vid() local
2112 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_add_vid()
2116 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2117 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_vlan_rx_add_vid()
2119 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2122 set_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_add_vid()
2131 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_vlan_rx_kill_vid() local
2134 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_kill_vid()
2138 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2139 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_vlan_rx_kill_vid()
2141 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2144 clear_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_kill_vid()
2176 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_set_mc() local
2179 &adapter->shared->devRead.rxFilterConf; in vmxnet3_set_mc()
2185 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_set_mc()
2190 vmxnet3_restore_vlan(adapter); in vmxnet3_set_mc()
2206 &adapter->pdev->dev, in vmxnet3_set_mc()
2212 if (!dma_mapping_error(&adapter->pdev->dev, in vmxnet3_set_mc()
2228 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2231 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_set_mc()
2233 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_set_mc()
2237 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_set_mc()
2239 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2242 dma_unmap_single(&adapter->pdev->dev, new_table_pa, in vmxnet3_set_mc()
2248 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_destroy_all() argument
2252 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_destroy_all()
2253 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); in vmxnet3_rq_destroy_all()
2262 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) in vmxnet3_setup_driver_shared() argument
2264 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_setup_driver_shared()
2284 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); in vmxnet3_setup_driver_shared()
2288 if (adapter->netdev->features & NETIF_F_RXCSUM) in vmxnet3_setup_driver_shared()
2291 if (adapter->netdev->features & NETIF_F_LRO) { in vmxnet3_setup_driver_shared()
2295 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in vmxnet3_setup_driver_shared()
2298 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); in vmxnet3_setup_driver_shared()
2299 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); in vmxnet3_setup_driver_shared()
2301 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + in vmxnet3_setup_driver_shared()
2302 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); in vmxnet3_setup_driver_shared()
2305 devRead->misc.numTxQueues = adapter->num_tx_queues; in vmxnet3_setup_driver_shared()
2306 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_setup_driver_shared()
2307 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared()
2308 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); in vmxnet3_setup_driver_shared()
2309 tqc = &adapter->tqd_start[i].conf; in vmxnet3_setup_driver_shared()
2324 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2325 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_setup_driver_shared()
2326 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_setup_driver_shared()
2327 rqc = &adapter->rqd_start[i].conf; in vmxnet3_setup_driver_shared()
2343 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); in vmxnet3_setup_driver_shared()
2345 if (adapter->rss) { in vmxnet3_setup_driver_shared()
2346 struct UPT1_RSSConf *rssConf = adapter->rss_conf; in vmxnet3_setup_driver_shared()
2349 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2361 i, adapter->num_rx_queues); in vmxnet3_setup_driver_shared()
2366 cpu_to_le64(adapter->rss_conf_pa); in vmxnet3_setup_driver_shared()
2372 devRead->intrConf.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2374 devRead->intrConf.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2375 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2376 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2378 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
2383 vmxnet3_restore_vlan(adapter); in vmxnet3_setup_driver_shared()
2384 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); in vmxnet3_setup_driver_shared()
2391 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) in vmxnet3_activate_dev() argument
2397 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," in vmxnet3_activate_dev()
2398 " ring sizes %u %u %u\n", adapter->netdev->name, in vmxnet3_activate_dev()
2399 adapter->skb_buf_size, adapter->rx_buf_per_pkt, in vmxnet3_activate_dev()
2400 adapter->tx_queue[0].tx_ring.size, in vmxnet3_activate_dev()
2401 adapter->rx_queue[0].rx_ring[0].size, in vmxnet3_activate_dev()
2402 adapter->rx_queue[0].rx_ring[1].size); in vmxnet3_activate_dev()
2404 vmxnet3_tq_init_all(adapter); in vmxnet3_activate_dev()
2405 err = vmxnet3_rq_init_all(adapter); in vmxnet3_activate_dev()
2407 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
2412 err = vmxnet3_request_irqs(adapter); in vmxnet3_activate_dev()
2414 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
2419 vmxnet3_setup_driver_shared(adapter); in vmxnet3_activate_dev()
2421 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( in vmxnet3_activate_dev()
2422 adapter->shared_pa)); in vmxnet3_activate_dev()
2423 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( in vmxnet3_activate_dev()
2424 adapter->shared_pa)); in vmxnet3_activate_dev()
2425 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
2426 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_activate_dev()
2428 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_activate_dev()
2429 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
2432 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
2438 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_activate_dev()
2439 VMXNET3_WRITE_BAR0_REG(adapter, in vmxnet3_activate_dev()
2441 adapter->rx_queue[i].rx_ring[0].next2fill); in vmxnet3_activate_dev()
2442 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 + in vmxnet3_activate_dev()
2444 adapter->rx_queue[i].rx_ring[1].next2fill); in vmxnet3_activate_dev()
2448 vmxnet3_set_mc(adapter->netdev); in vmxnet3_activate_dev()
2454 vmxnet3_check_link(adapter, true); in vmxnet3_activate_dev()
2455 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_activate_dev()
2456 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_activate_dev()
2457 vmxnet3_enable_all_intrs(adapter); in vmxnet3_activate_dev()
2458 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_activate_dev()
2462 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); in vmxnet3_activate_dev()
2463 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); in vmxnet3_activate_dev()
2464 vmxnet3_free_irqs(adapter); in vmxnet3_activate_dev()
2468 vmxnet3_rq_cleanup_all(adapter); in vmxnet3_activate_dev()
2474 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) in vmxnet3_reset_dev() argument
2477 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
2478 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); in vmxnet3_reset_dev()
2479 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
2484 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) in vmxnet3_quiesce_dev() argument
2488 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) in vmxnet3_quiesce_dev()
2492 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
2493 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_quiesce_dev()
2495 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
2496 vmxnet3_disable_all_intrs(adapter); in vmxnet3_quiesce_dev()
2498 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_quiesce_dev()
2499 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_quiesce_dev()
2500 netif_tx_disable(adapter->netdev); in vmxnet3_quiesce_dev()
2501 adapter->link_speed = 0; in vmxnet3_quiesce_dev()
2502 netif_carrier_off(adapter->netdev); in vmxnet3_quiesce_dev()
2504 vmxnet3_tq_cleanup_all(adapter); in vmxnet3_quiesce_dev()
2505 vmxnet3_rq_cleanup_all(adapter); in vmxnet3_quiesce_dev()
2506 vmxnet3_free_irqs(adapter); in vmxnet3_quiesce_dev()
2512 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) in vmxnet3_write_mac_addr() argument
2517 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); in vmxnet3_write_mac_addr()
2520 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); in vmxnet3_write_mac_addr()
2528 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_set_mac_addr() local
2531 vmxnet3_write_mac_addr(adapter, addr->sa_data); in vmxnet3_set_mac_addr()
2540 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) in vmxnet3_alloc_pci_resources() argument
2544 struct pci_dev *pdev = adapter->pdev; in vmxnet3_alloc_pci_resources()
2582 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
2583 if (!adapter->hw_addr0) { in vmxnet3_alloc_pci_resources()
2591 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
2592 if (!adapter->hw_addr1) { in vmxnet3_alloc_pci_resources()
2600 iounmap(adapter->hw_addr0); in vmxnet3_alloc_pci_resources()
2610 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) in vmxnet3_free_pci_resources() argument
2612 BUG_ON(!adapter->pdev); in vmxnet3_free_pci_resources()
2614 iounmap(adapter->hw_addr0); in vmxnet3_free_pci_resources()
2615 iounmap(adapter->hw_addr1); in vmxnet3_free_pci_resources()
2616 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); in vmxnet3_free_pci_resources()
2617 pci_disable_device(adapter->pdev); in vmxnet3_free_pci_resources()
2622 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) in vmxnet3_adjust_rx_ring_size() argument
2625 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; in vmxnet3_adjust_rx_ring_size()
2628 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - in vmxnet3_adjust_rx_ring_size()
2630 adapter->skb_buf_size = adapter->netdev->mtu + in vmxnet3_adjust_rx_ring_size()
2632 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) in vmxnet3_adjust_rx_ring_size()
2633 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
2635 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
2637 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
2638 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + in vmxnet3_adjust_rx_ring_size()
2640 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; in vmxnet3_adjust_rx_ring_size()
2647 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; in vmxnet3_adjust_rx_ring_size()
2648 ring0_size = adapter->rx_queue[0].rx_ring[0].size; in vmxnet3_adjust_rx_ring_size()
2652 ring1_size = adapter->rx_queue[0].rx_ring[1].size; in vmxnet3_adjust_rx_ring_size()
2658 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_adjust_rx_ring_size()
2659 rq = &adapter->rx_queue[i]; in vmxnet3_adjust_rx_ring_size()
2668 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, in vmxnet3_create_queues() argument
2673 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_create_queues()
2674 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues()
2678 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
2680 tq->adapter = adapter; in vmxnet3_create_queues()
2682 err = vmxnet3_tq_create(tq, adapter); in vmxnet3_create_queues()
2691 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; in vmxnet3_create_queues()
2692 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; in vmxnet3_create_queues()
2693 vmxnet3_adjust_rx_ring_size(adapter); in vmxnet3_create_queues()
2694 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_create_queues()
2695 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_create_queues()
2698 rq->shared = &adapter->rqd_start[i].ctrl; in vmxnet3_create_queues()
2699 rq->adapter = adapter; in vmxnet3_create_queues()
2700 err = vmxnet3_rq_create(rq, adapter); in vmxnet3_create_queues()
2703 netdev_err(adapter->netdev, in vmxnet3_create_queues()
2708 netdev_info(adapter->netdev, in vmxnet3_create_queues()
2711 adapter->num_rx_queues = i; in vmxnet3_create_queues()
2719 vmxnet3_tq_destroy_all(adapter); in vmxnet3_create_queues()
2726 struct vmxnet3_adapter *adapter; in vmxnet3_open() local
2729 adapter = netdev_priv(netdev); in vmxnet3_open()
2731 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_open()
2732 spin_lock_init(&adapter->tx_queue[i].tx_lock); in vmxnet3_open()
2734 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, in vmxnet3_open()
2735 adapter->rx_ring_size, in vmxnet3_open()
2736 adapter->rx_ring2_size); in vmxnet3_open()
2740 err = vmxnet3_activate_dev(adapter); in vmxnet3_open()
2747 vmxnet3_rq_destroy_all(adapter); in vmxnet3_open()
2748 vmxnet3_tq_destroy_all(adapter); in vmxnet3_open()
2757 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_close() local
2763 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_close()
2766 vmxnet3_quiesce_dev(adapter); in vmxnet3_close()
2768 vmxnet3_rq_destroy_all(adapter); in vmxnet3_close()
2769 vmxnet3_tq_destroy_all(adapter); in vmxnet3_close()
2771 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_close()
2779 vmxnet3_force_close(struct vmxnet3_adapter *adapter) in vmxnet3_force_close() argument
2787 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); in vmxnet3_force_close()
2790 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_force_close()
2791 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_force_close()
2792 dev_close(adapter->netdev); in vmxnet3_force_close()
2799 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_change_mtu() local
2811 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_change_mtu()
2815 vmxnet3_quiesce_dev(adapter); in vmxnet3_change_mtu()
2816 vmxnet3_reset_dev(adapter); in vmxnet3_change_mtu()
2819 vmxnet3_rq_destroy_all(adapter); in vmxnet3_change_mtu()
2820 vmxnet3_adjust_rx_ring_size(adapter); in vmxnet3_change_mtu()
2821 err = vmxnet3_rq_create_all(adapter); in vmxnet3_change_mtu()
2829 err = vmxnet3_activate_dev(adapter); in vmxnet3_change_mtu()
2839 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_change_mtu()
2841 vmxnet3_force_close(adapter); in vmxnet3_change_mtu()
2848 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) in vmxnet3_declare_features() argument
2850 struct net_device *netdev = adapter->netdev; in vmxnet3_declare_features()
2866 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) in vmxnet3_read_mac_addr() argument
2870 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); in vmxnet3_read_mac_addr()
2873 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); in vmxnet3_read_mac_addr()
2890 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec) in vmxnet3_acquire_msix_vectors() argument
2892 int ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
2893 adapter->intr.msix_entries, nvec, nvec); in vmxnet3_acquire_msix_vectors()
2896 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
2900 ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
2901 adapter->intr.msix_entries, in vmxnet3_acquire_msix_vectors()
2907 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
2918 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) in vmxnet3_alloc_intr_resources() argument
2924 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
2925 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_alloc_intr_resources()
2927 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_alloc_intr_resources()
2928 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
2929 adapter->intr.type = cfg & 0x3; in vmxnet3_alloc_intr_resources()
2930 adapter->intr.mask_mode = (cfg >> 2) & 0x3; in vmxnet3_alloc_intr_resources()
2932 if (adapter->intr.type == VMXNET3_IT_AUTO) { in vmxnet3_alloc_intr_resources()
2933 adapter->intr.type = VMXNET3_IT_MSIX; in vmxnet3_alloc_intr_resources()
2937 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_alloc_intr_resources()
2940 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? in vmxnet3_alloc_intr_resources()
2941 1 : adapter->num_tx_queues; in vmxnet3_alloc_intr_resources()
2942 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? in vmxnet3_alloc_intr_resources()
2943 0 : adapter->num_rx_queues; in vmxnet3_alloc_intr_resources()
2949 adapter->intr.msix_entries[i].entry = i; in vmxnet3_alloc_intr_resources()
2951 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); in vmxnet3_alloc_intr_resources()
2959 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE in vmxnet3_alloc_intr_resources()
2960 || adapter->num_rx_queues != 1) { in vmxnet3_alloc_intr_resources()
2961 adapter->share_intr = VMXNET3_INTR_TXSHARE; in vmxnet3_alloc_intr_resources()
2962 netdev_err(adapter->netdev, in vmxnet3_alloc_intr_resources()
2964 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
2968 adapter->intr.num_intrs = nvec; in vmxnet3_alloc_intr_resources()
2973 dev_info(&adapter->pdev->dev, in vmxnet3_alloc_intr_resources()
2977 adapter->intr.type = VMXNET3_IT_MSI; in vmxnet3_alloc_intr_resources()
2980 if (adapter->intr.type == VMXNET3_IT_MSI) { in vmxnet3_alloc_intr_resources()
2981 if (!pci_enable_msi(adapter->pdev)) { in vmxnet3_alloc_intr_resources()
2982 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
2983 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
2989 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
2990 dev_info(&adapter->netdev->dev, in vmxnet3_alloc_intr_resources()
2992 adapter->intr.type = VMXNET3_IT_INTX; in vmxnet3_alloc_intr_resources()
2995 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3000 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) in vmxnet3_free_intr_resources() argument
3002 if (adapter->intr.type == VMXNET3_IT_MSIX) in vmxnet3_free_intr_resources()
3003 pci_disable_msix(adapter->pdev); in vmxnet3_free_intr_resources()
3004 else if (adapter->intr.type == VMXNET3_IT_MSI) in vmxnet3_free_intr_resources()
3005 pci_disable_msi(adapter->pdev); in vmxnet3_free_intr_resources()
3007 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); in vmxnet3_free_intr_resources()
3014 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_tx_timeout() local
3015 adapter->tx_timeout_count++; in vmxnet3_tx_timeout()
3017 netdev_err(adapter->netdev, "tx hang\n"); in vmxnet3_tx_timeout()
3018 schedule_work(&adapter->work); in vmxnet3_tx_timeout()
3019 netif_wake_queue(adapter->netdev); in vmxnet3_tx_timeout()
3026 struct vmxnet3_adapter *adapter; in vmxnet3_reset_work() local
3028 adapter = container_of(data, struct vmxnet3_adapter, work); in vmxnet3_reset_work()
3031 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_reset_work()
3036 if (netif_running(adapter->netdev)) { in vmxnet3_reset_work()
3037 netdev_notice(adapter->netdev, "resetting\n"); in vmxnet3_reset_work()
3038 vmxnet3_quiesce_dev(adapter); in vmxnet3_reset_work()
3039 vmxnet3_reset_dev(adapter); in vmxnet3_reset_work()
3040 vmxnet3_activate_dev(adapter); in vmxnet3_reset_work()
3042 netdev_info(adapter->netdev, "already closed\n"); in vmxnet3_reset_work()
3046 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_reset_work()
3074 struct vmxnet3_adapter *adapter; in vmxnet3_probe_device() local
3109 adapter = netdev_priv(netdev); in vmxnet3_probe_device()
3110 adapter->netdev = netdev; in vmxnet3_probe_device()
3111 adapter->pdev = pdev; in vmxnet3_probe_device()
3113 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; in vmxnet3_probe_device()
3114 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; in vmxnet3_probe_device()
3115 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; in vmxnet3_probe_device()
3117 spin_lock_init(&adapter->cmd_lock); in vmxnet3_probe_device()
3118 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, in vmxnet3_probe_device()
3121 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { in vmxnet3_probe_device()
3126 adapter->shared = dma_alloc_coherent( in vmxnet3_probe_device()
3127 &adapter->pdev->dev, in vmxnet3_probe_device()
3129 &adapter->shared_pa, GFP_KERNEL); in vmxnet3_probe_device()
3130 if (!adapter->shared) { in vmxnet3_probe_device()
3136 adapter->num_rx_queues = num_rx_queues; in vmxnet3_probe_device()
3137 adapter->num_tx_queues = num_tx_queues; in vmxnet3_probe_device()
3138 adapter->rx_buf_per_pkt = 1; in vmxnet3_probe_device()
3140 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_probe_device()
3141 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; in vmxnet3_probe_device()
3142 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, in vmxnet3_probe_device()
3143 &adapter->queue_desc_pa, in vmxnet3_probe_device()
3146 if (!adapter->tqd_start) { in vmxnet3_probe_device()
3151 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + in vmxnet3_probe_device()
3152 adapter->num_tx_queues); in vmxnet3_probe_device()
3154 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
3156 &adapter->pm_conf_pa, in vmxnet3_probe_device()
3158 if (adapter->pm_conf == NULL) { in vmxnet3_probe_device()
3165 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
3167 &adapter->rss_conf_pa, in vmxnet3_probe_device()
3169 if (adapter->rss_conf == NULL) { in vmxnet3_probe_device()
3175 err = vmxnet3_alloc_pci_resources(adapter, &dma64); in vmxnet3_probe_device()
3179 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); in vmxnet3_probe_device()
3181 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2); in vmxnet3_probe_device()
3182 adapter->version = 2; in vmxnet3_probe_device()
3184 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); in vmxnet3_probe_device()
3185 adapter->version = 1; in vmxnet3_probe_device()
3192 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); in vmxnet3_probe_device()
3194 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); in vmxnet3_probe_device()
3196 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); in vmxnet3_probe_device()
3205 vmxnet3_declare_features(adapter, dma64); in vmxnet3_probe_device()
3207 if (adapter->num_tx_queues == adapter->num_rx_queues) in vmxnet3_probe_device()
3208 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; in vmxnet3_probe_device()
3210 adapter->share_intr = VMXNET3_INTR_DONTSHARE; in vmxnet3_probe_device()
3212 vmxnet3_alloc_intr_resources(adapter); in vmxnet3_probe_device()
3215 if (adapter->num_rx_queues > 1 && in vmxnet3_probe_device()
3216 adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
3217 adapter->rss = true; in vmxnet3_probe_device()
3222 adapter->rss = false; in vmxnet3_probe_device()
3226 vmxnet3_read_mac_addr(adapter, mac); in vmxnet3_probe_device()
3233 INIT_WORK(&adapter->work, vmxnet3_reset_work); in vmxnet3_probe_device()
3234 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_probe_device()
3236 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
3238 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_probe_device()
3239 netif_napi_add(adapter->netdev, in vmxnet3_probe_device()
3240 &adapter->rx_queue[i].napi, in vmxnet3_probe_device()
3244 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, in vmxnet3_probe_device()
3248 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); in vmxnet3_probe_device()
3249 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); in vmxnet3_probe_device()
3259 vmxnet3_check_link(adapter, false); in vmxnet3_probe_device()
3263 vmxnet3_free_intr_resources(adapter); in vmxnet3_probe_device()
3265 vmxnet3_free_pci_resources(adapter); in vmxnet3_probe_device()
3268 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_probe_device()
3269 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_probe_device()
3272 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_probe_device()
3273 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_probe_device()
3275 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_probe_device()
3276 adapter->queue_desc_pa); in vmxnet3_probe_device()
3278 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
3280 adapter->shared, adapter->shared_pa); in vmxnet3_probe_device()
3282 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_probe_device()
3294 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_remove_device() local
3307 cancel_work_sync(&adapter->work); in vmxnet3_remove_device()
3311 vmxnet3_free_intr_resources(adapter); in vmxnet3_remove_device()
3312 vmxnet3_free_pci_resources(adapter); in vmxnet3_remove_device()
3314 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_remove_device()
3315 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_remove_device()
3317 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_remove_device()
3318 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_remove_device()
3320 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_remove_device()
3322 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_remove_device()
3323 adapter->queue_desc_pa); in vmxnet3_remove_device()
3324 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
3326 adapter->shared, adapter->shared_pa); in vmxnet3_remove_device()
3327 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_remove_device()
3335 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_shutdown_device() local
3341 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_shutdown_device()
3345 &adapter->state)) { in vmxnet3_shutdown_device()
3346 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
3349 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
3350 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_shutdown_device()
3352 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
3353 vmxnet3_disable_all_intrs(adapter); in vmxnet3_shutdown_device()
3355 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
3366 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_suspend() local
3379 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_suspend()
3380 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_suspend()
3382 vmxnet3_disable_all_intrs(adapter); in vmxnet3_suspend()
3383 vmxnet3_free_irqs(adapter); in vmxnet3_suspend()
3384 vmxnet3_free_intr_resources(adapter); in vmxnet3_suspend()
3390 pmConf = adapter->pm_conf; in vmxnet3_suspend()
3393 if (adapter->wol & WAKE_UCAST) { in vmxnet3_suspend()
3403 if (adapter->wol & WAKE_ARP) { in vmxnet3_suspend()
3446 if (adapter->wol & WAKE_MAGIC) in vmxnet3_suspend()
3451 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); in vmxnet3_suspend()
3452 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( in vmxnet3_suspend()
3454 adapter->shared->devRead.pmConfDesc.confPA = in vmxnet3_suspend()
3455 cpu_to_le64(adapter->pm_conf_pa); in vmxnet3_suspend()
3457 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_suspend()
3458 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_suspend()
3460 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_suspend()
3464 adapter->wol); in vmxnet3_suspend()
3479 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_resume() local
3492 vmxnet3_alloc_intr_resources(adapter); in vmxnet3_resume()
3501 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_resume()
3502 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_resume()
3504 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_resume()
3505 vmxnet3_tq_cleanup_all(adapter); in vmxnet3_resume()
3506 vmxnet3_rq_cleanup_all(adapter); in vmxnet3_resume()
3508 vmxnet3_reset_dev(adapter); in vmxnet3_resume()
3509 err = vmxnet3_activate_dev(adapter); in vmxnet3_resume()
3513 vmxnet3_force_close(adapter); in vmxnet3_resume()