Lines Matching refs:p
159 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) in octeon_mgmt_set_rx_irq() argument
164 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_rx_irq()
165 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); in octeon_mgmt_set_rx_irq()
167 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_set_rx_irq()
168 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_rx_irq()
171 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) in octeon_mgmt_set_tx_irq() argument
176 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_tx_irq()
177 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); in octeon_mgmt_set_tx_irq()
179 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_set_tx_irq()
180 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_tx_irq()
183 static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) in octeon_mgmt_enable_rx_irq() argument
185 octeon_mgmt_set_rx_irq(p, 1); in octeon_mgmt_enable_rx_irq()
188 static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) in octeon_mgmt_disable_rx_irq() argument
190 octeon_mgmt_set_rx_irq(p, 0); in octeon_mgmt_disable_rx_irq()
193 static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) in octeon_mgmt_enable_tx_irq() argument
195 octeon_mgmt_set_tx_irq(p, 1); in octeon_mgmt_enable_tx_irq()
198 static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) in octeon_mgmt_disable_tx_irq() argument
200 octeon_mgmt_set_tx_irq(p, 0); in octeon_mgmt_disable_tx_irq()
215 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_rx_fill_ring() local
217 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { in octeon_mgmt_rx_fill_ring()
229 __skb_queue_tail(&p->rx_list, skb); in octeon_mgmt_rx_fill_ring()
233 re.s.addr = dma_map_single(p->dev, skb->data, in octeon_mgmt_rx_fill_ring()
238 p->rx_ring[p->rx_next_fill] = re.d64; in octeon_mgmt_rx_fill_ring()
239 dma_sync_single_for_device(p->dev, p->rx_ring_handle, in octeon_mgmt_rx_fill_ring()
242 p->rx_next_fill = in octeon_mgmt_rx_fill_ring()
243 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; in octeon_mgmt_rx_fill_ring()
244 p->rx_current_fill++; in octeon_mgmt_rx_fill_ring()
246 cvmx_write_csr(p->mix + MIX_IRING2, 1); in octeon_mgmt_rx_fill_ring()
250 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) in octeon_mgmt_clean_tx_buffers() argument
258 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
260 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
262 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
265 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
269 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, in octeon_mgmt_clean_tx_buffers()
273 re.d64 = p->tx_ring[p->tx_next_clean]; in octeon_mgmt_clean_tx_buffers()
274 p->tx_next_clean = in octeon_mgmt_clean_tx_buffers()
275 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; in octeon_mgmt_clean_tx_buffers()
276 skb = __skb_dequeue(&p->tx_list); in octeon_mgmt_clean_tx_buffers()
282 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); in octeon_mgmt_clean_tx_buffers()
283 p->tx_current_fill--; in octeon_mgmt_clean_tx_buffers()
285 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
287 dma_unmap_single(p->dev, re.s.addr, re.s.len, in octeon_mgmt_clean_tx_buffers()
297 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); in octeon_mgmt_clean_tx_buffers()
299 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); in octeon_mgmt_clean_tx_buffers()
308 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
311 if (cleaned && netif_queue_stopped(p->netdev)) in octeon_mgmt_clean_tx_buffers()
312 netif_wake_queue(p->netdev); in octeon_mgmt_clean_tx_buffers()
317 struct octeon_mgmt *p = (struct octeon_mgmt *)arg; in octeon_mgmt_clean_tx_tasklet() local
318 octeon_mgmt_clean_tx_buffers(p); in octeon_mgmt_clean_tx_tasklet()
319 octeon_mgmt_enable_tx_irq(p); in octeon_mgmt_clean_tx_tasklet()
324 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_update_rx_stats() local
329 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); in octeon_mgmt_update_rx_stats()
330 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); in octeon_mgmt_update_rx_stats()
334 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_update_rx_stats()
337 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_update_rx_stats()
343 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_update_tx_stats() local
350 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); in octeon_mgmt_update_tx_stats()
351 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); in octeon_mgmt_update_tx_stats()
355 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_update_tx_stats()
358 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_update_tx_stats()
366 static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, in octeon_mgmt_dequeue_rx_buffer() argument
371 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, in octeon_mgmt_dequeue_rx_buffer()
375 re.d64 = p->rx_ring[p->rx_next]; in octeon_mgmt_dequeue_rx_buffer()
376 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; in octeon_mgmt_dequeue_rx_buffer()
377 p->rx_current_fill--; in octeon_mgmt_dequeue_rx_buffer()
378 *pskb = __skb_dequeue(&p->rx_list); in octeon_mgmt_dequeue_rx_buffer()
380 dma_unmap_single(p->dev, re.s.addr, in octeon_mgmt_dequeue_rx_buffer()
388 static int octeon_mgmt_receive_one(struct octeon_mgmt *p) in octeon_mgmt_receive_one() argument
390 struct net_device *netdev = p->netdev; in octeon_mgmt_receive_one()
400 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); in octeon_mgmt_receive_one()
406 if (p->has_rx_tstamp) { in octeon_mgmt_receive_one()
429 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); in octeon_mgmt_receive_one()
460 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); in octeon_mgmt_receive_one()
469 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); in octeon_mgmt_receive_one()
473 static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) in octeon_mgmt_receive_packets() argument
479 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); in octeon_mgmt_receive_packets()
482 rc = octeon_mgmt_receive_one(p); in octeon_mgmt_receive_packets()
487 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); in octeon_mgmt_receive_packets()
490 octeon_mgmt_rx_fill_ring(p->netdev); in octeon_mgmt_receive_packets()
497 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); in octeon_mgmt_napi_poll() local
498 struct net_device *netdev = p->netdev; in octeon_mgmt_napi_poll()
501 work_done = octeon_mgmt_receive_packets(p, budget); in octeon_mgmt_napi_poll()
506 octeon_mgmt_enable_rx_irq(p); in octeon_mgmt_napi_poll()
514 static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) in octeon_mgmt_reset_hw() argument
521 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_reset_hw()
523 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_reset_hw()
526 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_reset_hw()
527 cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_reset_hw()
530 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); in octeon_mgmt_reset_hw()
532 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", in octeon_mgmt_reset_hw()
537 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", in octeon_mgmt_reset_hw()
560 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_set_rx_filtering() local
602 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_rx_filtering()
605 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_set_rx_filtering()
608 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); in octeon_mgmt_set_rx_filtering()
615 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); in octeon_mgmt_set_rx_filtering()
617 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); in octeon_mgmt_set_rx_filtering()
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); in octeon_mgmt_set_rx_filtering()
619 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); in octeon_mgmt_set_rx_filtering()
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); in octeon_mgmt_set_rx_filtering()
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); in octeon_mgmt_set_rx_filtering()
622 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); in octeon_mgmt_set_rx_filtering()
623 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); in octeon_mgmt_set_rx_filtering()
627 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); in octeon_mgmt_set_rx_filtering()
629 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_rx_filtering()
646 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_change_mtu() local
653 dev_warn(p->dev, "MTU must be between %d and %d.\n", in octeon_mgmt_change_mtu()
661 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); in octeon_mgmt_change_mtu()
662 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, in octeon_mgmt_change_mtu()
671 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_interrupt() local
674 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); in octeon_mgmt_interrupt()
677 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); in octeon_mgmt_interrupt()
678 cvmx_read_csr(p->mix + MIX_ISR); in octeon_mgmt_interrupt()
681 octeon_mgmt_disable_rx_irq(p); in octeon_mgmt_interrupt()
682 napi_schedule(&p->napi); in octeon_mgmt_interrupt()
685 octeon_mgmt_disable_tx_irq(p); in octeon_mgmt_interrupt()
686 tasklet_schedule(&p->tx_clean_tasklet); in octeon_mgmt_interrupt()
695 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_ioctl_hwtstamp() local
750 p->has_rx_tstamp = false; in octeon_mgmt_ioctl_hwtstamp()
751 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); in octeon_mgmt_ioctl_hwtstamp()
753 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_ioctl_hwtstamp()
769 p->has_rx_tstamp = have_hw_timestamps; in octeon_mgmt_ioctl_hwtstamp()
771 if (p->has_rx_tstamp) { in octeon_mgmt_ioctl_hwtstamp()
772 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); in octeon_mgmt_ioctl_hwtstamp()
774 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_ioctl_hwtstamp()
790 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_ioctl() local
796 if (p->phydev) in octeon_mgmt_ioctl()
797 return phy_mii_ioctl(p->phydev, rq, cmd); in octeon_mgmt_ioctl()
802 static void octeon_mgmt_disable_link(struct octeon_mgmt *p) in octeon_mgmt_disable_link() argument
807 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_disable_link()
811 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_disable_link()
816 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_disable_link()
825 static void octeon_mgmt_enable_link(struct octeon_mgmt *p) in octeon_mgmt_enable_link() argument
830 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_enable_link()
834 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_enable_link()
837 static void octeon_mgmt_update_link(struct octeon_mgmt *p) in octeon_mgmt_update_link() argument
841 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_update_link()
843 if (!p->phydev->link) in octeon_mgmt_update_link()
846 prtx_cfg.s.duplex = p->phydev->duplex; in octeon_mgmt_update_link()
848 switch (p->phydev->speed) { in octeon_mgmt_update_link()
874 prtx_cfg.s.burst = p->phydev->duplex; in octeon_mgmt_update_link()
883 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_update_link()
886 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_update_link()
892 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_update_link()
893 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); in octeon_mgmt_update_link()
897 if (p->phydev->speed == 10) in octeon_mgmt_update_link()
899 else if (p->phydev->speed == 100) in octeon_mgmt_update_link()
902 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); in octeon_mgmt_update_link()
908 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_adjust_link() local
912 if (!p->phydev) in octeon_mgmt_adjust_link()
915 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_adjust_link()
918 if (!p->phydev->link && p->last_link) in octeon_mgmt_adjust_link()
921 if (p->phydev->link in octeon_mgmt_adjust_link()
922 && (p->last_duplex != p->phydev->duplex in octeon_mgmt_adjust_link()
923 || p->last_link != p->phydev->link in octeon_mgmt_adjust_link()
924 || p->last_speed != p->phydev->speed)) { in octeon_mgmt_adjust_link()
925 octeon_mgmt_disable_link(p); in octeon_mgmt_adjust_link()
927 octeon_mgmt_update_link(p); in octeon_mgmt_adjust_link()
928 octeon_mgmt_enable_link(p); in octeon_mgmt_adjust_link()
931 p->last_link = p->phydev->link; in octeon_mgmt_adjust_link()
932 p->last_speed = p->phydev->speed; in octeon_mgmt_adjust_link()
933 p->last_duplex = p->phydev->duplex; in octeon_mgmt_adjust_link()
935 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_adjust_link()
940 p->phydev->speed, in octeon_mgmt_adjust_link()
941 DUPLEX_FULL == p->phydev->duplex ? in octeon_mgmt_adjust_link()
951 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_init_phy() local
953 if (octeon_is_simulation() || p->phy_np == NULL) { in octeon_mgmt_init_phy()
959 p->phydev = of_phy_connect(netdev, p->phy_np, in octeon_mgmt_init_phy()
963 if (!p->phydev) in octeon_mgmt_init_phy()
971 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_open() local
983 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), in octeon_mgmt_open()
985 if (!p->tx_ring) in octeon_mgmt_open()
987 p->tx_ring_handle = in octeon_mgmt_open()
988 dma_map_single(p->dev, p->tx_ring, in octeon_mgmt_open()
991 p->tx_next = 0; in octeon_mgmt_open()
992 p->tx_next_clean = 0; in octeon_mgmt_open()
993 p->tx_current_fill = 0; in octeon_mgmt_open()
996 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), in octeon_mgmt_open()
998 if (!p->rx_ring) in octeon_mgmt_open()
1000 p->rx_ring_handle = in octeon_mgmt_open()
1001 dma_map_single(p->dev, p->rx_ring, in octeon_mgmt_open()
1005 p->rx_next = 0; in octeon_mgmt_open()
1006 p->rx_next_fill = 0; in octeon_mgmt_open()
1007 p->rx_current_fill = 0; in octeon_mgmt_open()
1009 octeon_mgmt_reset_hw(p); in octeon_mgmt_open()
1011 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_open()
1016 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_open()
1018 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_open()
1035 if (p->port) { in octeon_mgmt_open()
1048 oring1.s.obase = p->tx_ring_handle >> 3; in octeon_mgmt_open()
1050 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); in octeon_mgmt_open()
1053 iring1.s.ibase = p->rx_ring_handle >> 3; in octeon_mgmt_open()
1055 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); in octeon_mgmt_open()
1074 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_open()
1078 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); in octeon_mgmt_open()
1083 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { in octeon_mgmt_open()
1085 int rgmii_mode = (p->phydev->supported & in octeon_mgmt_open()
1088 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1090 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1099 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1105 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1106 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ in octeon_mgmt_open()
1114 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1116 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1119 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1124 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1126 cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1144 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); in octeon_mgmt_open()
1145 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); in octeon_mgmt_open()
1146 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); in octeon_mgmt_open()
1148 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); in octeon_mgmt_open()
1149 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); in octeon_mgmt_open()
1150 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); in octeon_mgmt_open()
1153 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); in octeon_mgmt_open()
1155 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, in octeon_mgmt_open()
1157 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); in octeon_mgmt_open()
1164 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); in octeon_mgmt_open()
1169 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); in octeon_mgmt_open()
1175 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_open()
1180 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; in octeon_mgmt_open()
1204 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_open()
1207 octeon_mgmt_disable_link(p); in octeon_mgmt_open()
1208 if (p->phydev) in octeon_mgmt_open()
1209 octeon_mgmt_update_link(p); in octeon_mgmt_open()
1210 octeon_mgmt_enable_link(p); in octeon_mgmt_open()
1212 p->last_link = 0; in octeon_mgmt_open()
1213 p->last_speed = 0; in octeon_mgmt_open()
1217 if (p->phydev) { in octeon_mgmt_open()
1219 phy_start_aneg(p->phydev); in octeon_mgmt_open()
1223 napi_enable(&p->napi); in octeon_mgmt_open()
1227 octeon_mgmt_reset_hw(p); in octeon_mgmt_open()
1228 dma_unmap_single(p->dev, p->rx_ring_handle, in octeon_mgmt_open()
1231 kfree(p->rx_ring); in octeon_mgmt_open()
1233 dma_unmap_single(p->dev, p->tx_ring_handle, in octeon_mgmt_open()
1236 kfree(p->tx_ring); in octeon_mgmt_open()
1242 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_stop() local
1244 napi_disable(&p->napi); in octeon_mgmt_stop()
1247 if (p->phydev) in octeon_mgmt_stop()
1248 phy_disconnect(p->phydev); in octeon_mgmt_stop()
1249 p->phydev = NULL; in octeon_mgmt_stop()
1253 octeon_mgmt_reset_hw(p); in octeon_mgmt_stop()
1255 free_irq(p->irq, netdev); in octeon_mgmt_stop()
1258 skb_queue_purge(&p->tx_list); in octeon_mgmt_stop()
1259 skb_queue_purge(&p->rx_list); in octeon_mgmt_stop()
1261 dma_unmap_single(p->dev, p->rx_ring_handle, in octeon_mgmt_stop()
1264 kfree(p->rx_ring); in octeon_mgmt_stop()
1266 dma_unmap_single(p->dev, p->tx_ring_handle, in octeon_mgmt_stop()
1269 kfree(p->tx_ring); in octeon_mgmt_stop()
1276 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_xmit() local
1284 re.s.addr = dma_map_single(p->dev, skb->data, in octeon_mgmt_xmit()
1288 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1290 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { in octeon_mgmt_xmit()
1291 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1293 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1296 if (unlikely(p->tx_current_fill >= in octeon_mgmt_xmit()
1298 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1299 dma_unmap_single(p->dev, re.s.addr, re.s.len, in octeon_mgmt_xmit()
1304 __skb_queue_tail(&p->tx_list, skb); in octeon_mgmt_xmit()
1307 p->tx_ring[p->tx_next] = re.d64; in octeon_mgmt_xmit()
1308 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; in octeon_mgmt_xmit()
1309 p->tx_current_fill++; in octeon_mgmt_xmit()
1311 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1313 dma_sync_single_for_device(p->dev, p->tx_ring_handle, in octeon_mgmt_xmit()
1321 cvmx_write_csr(p->mix + MIX_ORING2, 1); in octeon_mgmt_xmit()
1333 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_poll_controller() local
1335 octeon_mgmt_receive_packets(p, 16); in octeon_mgmt_poll_controller()
1356 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_get_settings() local
1358 if (p->phydev) in octeon_mgmt_get_settings()
1359 return phy_ethtool_gset(p->phydev, cmd); in octeon_mgmt_get_settings()
1367 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_set_settings() local
1372 if (p->phydev) in octeon_mgmt_set_settings()
1373 return phy_ethtool_sset(p->phydev, cmd); in octeon_mgmt_set_settings()
1380 struct octeon_mgmt *p = netdev_priv(dev); in octeon_mgmt_nway_reset() local
1385 if (p->phydev) in octeon_mgmt_nway_reset()
1386 return phy_start_aneg(p->phydev); in octeon_mgmt_nway_reset()
1415 struct octeon_mgmt *p; in octeon_mgmt_probe() local
1431 p = netdev_priv(netdev); in octeon_mgmt_probe()
1432 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, in octeon_mgmt_probe()
1435 p->netdev = netdev; in octeon_mgmt_probe()
1436 p->dev = &pdev->dev; in octeon_mgmt_probe()
1437 p->has_rx_tstamp = false; in octeon_mgmt_probe()
1441 p->port = be32_to_cpup(data); in octeon_mgmt_probe()
1448 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); in octeon_mgmt_probe()
1454 p->irq = result; in octeon_mgmt_probe()
1477 p->mix_phys = res_mix->start; in octeon_mgmt_probe()
1478 p->mix_size = resource_size(res_mix); in octeon_mgmt_probe()
1479 p->agl_phys = res_agl->start; in octeon_mgmt_probe()
1480 p->agl_size = resource_size(res_agl); in octeon_mgmt_probe()
1481 p->agl_prt_ctl_phys = res_agl_prt_ctl->start; in octeon_mgmt_probe()
1482 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); in octeon_mgmt_probe()
1485 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, in octeon_mgmt_probe()
1493 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, in octeon_mgmt_probe()
1501 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, in octeon_mgmt_probe()
1502 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { in octeon_mgmt_probe()
1509 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); in octeon_mgmt_probe()
1510 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); in octeon_mgmt_probe()
1511 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, in octeon_mgmt_probe()
1512 p->agl_prt_ctl_size); in octeon_mgmt_probe()
1513 spin_lock_init(&p->lock); in octeon_mgmt_probe()
1515 skb_queue_head_init(&p->tx_list); in octeon_mgmt_probe()
1516 skb_queue_head_init(&p->rx_list); in octeon_mgmt_probe()
1517 tasklet_init(&p->tx_clean_tasklet, in octeon_mgmt_probe()
1518 octeon_mgmt_clean_tx_tasklet, (unsigned long)p); in octeon_mgmt_probe()
1532 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in octeon_mgmt_probe()