Lines Matching refs:priv

161 #define PRIV_TO_DEV(priv) (&(priv)->dev->dev)  argument
163 static void ec_bhf_reset(struct ec_bhf_priv *priv) in ec_bhf_reset() argument
165 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); in ec_bhf_reset()
166 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); in ec_bhf_reset()
167 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); in ec_bhf_reset()
168 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); in ec_bhf_reset()
169 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); in ec_bhf_reset()
170 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); in ec_bhf_reset()
171 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); in ec_bhf_reset()
173 iowrite8(0, priv->fifo_io + FIFO_TX_RESET); in ec_bhf_reset()
174 iowrite8(0, priv->fifo_io + FIFO_RX_RESET); in ec_bhf_reset()
176 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); in ec_bhf_reset()
179 static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) in ec_bhf_send_packet() argument
182 u32 addr = (u8 *)desc - priv->tx_buf.buf; in ec_bhf_send_packet()
184 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); in ec_bhf_send_packet()
192 static void ec_bhf_process_tx(struct ec_bhf_priv *priv) in ec_bhf_process_tx() argument
194 if (unlikely(netif_queue_stopped(priv->net_dev))) { in ec_bhf_process_tx()
198 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) in ec_bhf_process_tx()
199 netif_wake_queue(priv->net_dev); in ec_bhf_process_tx()
208 static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) in ec_bhf_add_rx_desc() argument
210 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), in ec_bhf_add_rx_desc()
211 priv->fifo_io + FIFO_RX_REG); in ec_bhf_add_rx_desc()
214 static void ec_bhf_process_rx(struct ec_bhf_priv *priv) in ec_bhf_process_rx() argument
216 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; in ec_bhf_process_rx()
224 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); in ec_bhf_process_rx()
227 skb->protocol = eth_type_trans(skb, priv->net_dev); in ec_bhf_process_rx()
228 priv->stat_rx_bytes += pkt_size; in ec_bhf_process_rx()
232 dev_err_ratelimited(PRIV_TO_DEV(priv), in ec_bhf_process_rx()
239 ec_bhf_add_rx_desc(priv, desc); in ec_bhf_process_rx()
241 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; in ec_bhf_process_rx()
242 desc = &priv->rx_descs[priv->rx_dnext]; in ec_bhf_process_rx()
248 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, in ec_bhf_timer_fun() local
250 ec_bhf_process_rx(priv); in ec_bhf_timer_fun()
251 ec_bhf_process_tx(priv); in ec_bhf_timer_fun()
253 if (!netif_running(priv->net_dev)) in ec_bhf_timer_fun()
260 static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) in ec_bhf_setup_offsets() argument
262 struct device *dev = PRIV_TO_DEV(priv); in ec_bhf_setup_offsets()
266 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); in ec_bhf_setup_offsets()
268 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + in ec_bhf_setup_offsets()
278 ec_info = priv->io + i * INFO_BLOCK_SIZE; in ec_bhf_setup_offsets()
280 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); in ec_bhf_setup_offsets()
281 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); in ec_bhf_setup_offsets()
283 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); in ec_bhf_setup_offsets()
284 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); in ec_bhf_setup_offsets()
285 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); in ec_bhf_setup_offsets()
286 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); in ec_bhf_setup_offsets()
294 struct ec_bhf_priv *priv = netdev_priv(net_dev); in ec_bhf_start_xmit() local
298 desc = &priv->tx_descs[priv->tx_dnext]; in ec_bhf_start_xmit()
307 ec_bhf_send_packet(priv, desc); in ec_bhf_start_xmit()
309 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; in ec_bhf_start_xmit()
311 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { in ec_bhf_start_xmit()
320 priv->stat_tx_bytes += len; in ec_bhf_start_xmit()
327 static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, in ec_bhf_alloc_dma_mem() argument
333 struct device *dev = PRIV_TO_DEV(priv); in ec_bhf_alloc_dma_mem()
336 iowrite32(0xffffffff, priv->dma_io + offset); in ec_bhf_alloc_dma_mem()
338 mask = ioread32(priv->dma_io + offset); in ec_bhf_alloc_dma_mem()
360 iowrite32(0, priv->dma_io + offset + 4); in ec_bhf_alloc_dma_mem()
361 iowrite32(buf->buf_phys, priv->dma_io + offset); in ec_bhf_alloc_dma_mem()
366 static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) in ec_bhf_setup_tx_descs() argument
370 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); in ec_bhf_setup_tx_descs()
371 priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf; in ec_bhf_setup_tx_descs()
372 priv->tx_dnext = 0; in ec_bhf_setup_tx_descs()
374 for (i = 0; i < priv->tx_dcount; i++) in ec_bhf_setup_tx_descs()
375 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); in ec_bhf_setup_tx_descs()
378 static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) in ec_bhf_setup_rx_descs() argument
382 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); in ec_bhf_setup_rx_descs()
383 priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf; in ec_bhf_setup_rx_descs()
384 priv->rx_dnext = 0; in ec_bhf_setup_rx_descs()
386 for (i = 0; i < priv->rx_dcount; i++) { in ec_bhf_setup_rx_descs()
387 struct rx_desc *desc = &priv->rx_descs[i]; in ec_bhf_setup_rx_descs()
390 if (i != priv->rx_dcount - 1) in ec_bhf_setup_rx_descs()
391 next = (u8 *)(desc + 1) - priv->rx_buf.buf; in ec_bhf_setup_rx_descs()
397 ec_bhf_add_rx_desc(priv, desc); in ec_bhf_setup_rx_descs()
403 struct ec_bhf_priv *priv = netdev_priv(net_dev); in ec_bhf_open() local
404 struct device *dev = PRIV_TO_DEV(priv); in ec_bhf_open()
407 ec_bhf_reset(priv); in ec_bhf_open()
409 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, in ec_bhf_open()
415 ec_bhf_setup_rx_descs(priv); in ec_bhf_open()
417 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, in ec_bhf_open()
423 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); in ec_bhf_open()
424 ec_bhf_setup_tx_descs(priv); in ec_bhf_open()
428 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in ec_bhf_open()
429 priv->hrtimer.function = ec_bhf_timer_fun; in ec_bhf_open()
430 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), in ec_bhf_open()
436 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, in ec_bhf_open()
437 priv->rx_buf.alloc_len); in ec_bhf_open()
444 struct ec_bhf_priv *priv = netdev_priv(net_dev); in ec_bhf_stop() local
445 struct device *dev = PRIV_TO_DEV(priv); in ec_bhf_stop()
447 hrtimer_cancel(&priv->hrtimer); in ec_bhf_stop()
449 ec_bhf_reset(priv); in ec_bhf_stop()
453 dma_free_coherent(dev, priv->tx_buf.alloc_len, in ec_bhf_stop()
454 priv->tx_buf.alloc, priv->tx_buf.alloc_phys); in ec_bhf_stop()
455 dma_free_coherent(dev, priv->rx_buf.alloc_len, in ec_bhf_stop()
456 priv->rx_buf.alloc, priv->rx_buf.alloc_phys); in ec_bhf_stop()
465 struct ec_bhf_priv *priv = netdev_priv(net_dev); in ec_bhf_get_stats() local
467 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + in ec_bhf_get_stats()
468 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + in ec_bhf_get_stats()
469 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); in ec_bhf_get_stats()
470 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); in ec_bhf_get_stats()
471 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); in ec_bhf_get_stats()
472 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); in ec_bhf_get_stats()
474 stats->tx_bytes = priv->stat_tx_bytes; in ec_bhf_get_stats()
475 stats->rx_bytes = priv->stat_rx_bytes; in ec_bhf_get_stats()
493 struct ec_bhf_priv *priv; in ec_bhf_probe() local
553 priv = netdev_priv(net_dev); in ec_bhf_probe()
554 priv->net_dev = net_dev; in ec_bhf_probe()
555 priv->io = io; in ec_bhf_probe()
556 priv->dma_io = dma_io; in ec_bhf_probe()
557 priv->dev = dev; in ec_bhf_probe()
559 err = ec_bhf_setup_offsets(priv); in ec_bhf_probe()
563 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); in ec_bhf_probe()
589 struct ec_bhf_priv *priv = netdev_priv(net_dev); in ec_bhf_remove() local
594 pci_iounmap(dev, priv->dma_io); in ec_bhf_remove()
595 pci_iounmap(dev, priv->io); in ec_bhf_remove()