Lines Matching refs:port
256 struct port { struct
314 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ argument
316 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) argument
318 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ argument
320 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) argument
342 static inline struct port* dev_to_port(struct net_device *dev) in dev_to_port()
360 static void hss_npe_send(struct port *port, struct msg *msg, const char* what) in hss_npe_send() argument
363 if (npe_send_message(port->npe, msg, what)) { in hss_npe_send()
365 port->id, val[0], val[1], npe_name(port->npe)); in hss_npe_send()
370 static void hss_config_set_lut(struct port *port) in hss_config_set_lut() argument
377 msg.hss_port = port->id; in hss_config_set_lut()
385 hss_npe_send(port, &msg, "HSS_SET_TX_LUT"); in hss_config_set_lut()
388 hss_npe_send(port, &msg, "HSS_SET_RX_LUT"); in hss_config_set_lut()
393 static void hss_config(struct port *port) in hss_config() argument
399 msg.hss_port = port->id; in hss_config()
403 if (port->clock_type == CLOCK_INT) in hss_config()
405 hss_npe_send(port, &msg, "HSS_SET_TX_PCR"); in hss_config()
409 hss_npe_send(port, &msg, "HSS_SET_RX_PCR"); in hss_config()
413 msg.hss_port = port->id; in hss_config()
415 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) | in hss_config()
416 (port->id ? CCR_SECOND_HSS : 0); in hss_config()
417 hss_npe_send(port, &msg, "HSS_SET_CORE_CR"); in hss_config()
421 msg.hss_port = port->id; in hss_config()
423 msg.data32 = port->clock_reg; in hss_config()
424 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR"); in hss_config()
428 msg.hss_port = port->id; in hss_config()
432 hss_npe_send(port, &msg, "HSS_SET_TX_FCR"); in hss_config()
436 msg.hss_port = port->id; in hss_config()
440 hss_npe_send(port, &msg, "HSS_SET_RX_FCR"); in hss_config()
442 hss_config_set_lut(port); in hss_config()
446 msg.hss_port = port->id; in hss_config()
447 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG"); in hss_config()
449 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") || in hss_config()
452 pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id); in hss_config()
457 npe_recv_message(port->npe, &msg, "FLUSH_IT"); in hss_config()
460 static void hss_set_hdlc_cfg(struct port *port) in hss_set_hdlc_cfg() argument
466 msg.hss_port = port->id; in hss_set_hdlc_cfg()
467 msg.data8a = port->hdlc_cfg; /* rx_cfg */ in hss_set_hdlc_cfg()
468 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */ in hss_set_hdlc_cfg()
469 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG"); in hss_set_hdlc_cfg()
472 static u32 hss_get_status(struct port *port) in hss_get_status() argument
478 msg.hss_port = port->id; in hss_get_status()
479 hss_npe_send(port, &msg, "PORT_ERROR_READ"); in hss_get_status()
480 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) { in hss_get_status()
481 pr_crit("HSS-%i: unable to read HSS status\n", port->id); in hss_get_status()
488 static void hss_start_hdlc(struct port *port) in hss_start_hdlc() argument
494 msg.hss_port = port->id; in hss_start_hdlc()
496 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE"); in hss_start_hdlc()
499 static void hss_stop_hdlc(struct port *port) in hss_stop_hdlc() argument
505 msg.hss_port = port->id; in hss_stop_hdlc()
506 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE"); in hss_stop_hdlc()
507 hss_get_status(port); /* make sure it's halted */ in hss_stop_hdlc()
510 static int hss_load_firmware(struct port *port) in hss_load_firmware() argument
515 if (port->initialized) in hss_load_firmware()
518 if (!npe_running(port->npe) && in hss_load_firmware()
519 (err = npe_load_firmware(port->npe, npe_name(port->npe), in hss_load_firmware()
520 port->dev))) in hss_load_firmware()
526 msg.hss_port = port->id; in hss_load_firmware()
528 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES"); in hss_load_firmware()
532 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO"); in hss_load_firmware()
538 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE"); in hss_load_firmware()
542 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE"); in hss_load_firmware()
546 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE"); in hss_load_firmware()
548 port->initialized = 1; in hss_load_firmware()
582 static inline int queue_get_desc(unsigned int queue, struct port *port, in queue_get_desc() argument
592 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); in queue_get_desc()
593 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); in queue_get_desc()
612 static inline void dma_unmap_tx(struct port *port, struct desc *desc) in dma_unmap_tx() argument
615 dma_unmap_single(&port->netdev->dev, desc->data, in dma_unmap_tx()
618 dma_unmap_single(&port->netdev->dev, desc->data & ~3, in dma_unmap_tx()
628 struct port *port = dev_to_port(netdev); in hss_hdlc_set_carrier() local
632 port->carrier = carrier; in hss_hdlc_set_carrier()
633 if (!port->loopback) { in hss_hdlc_set_carrier()
645 struct port *port = dev_to_port(dev); in hss_hdlc_rx_irq() local
650 qmgr_disable_irq(queue_ids[port->id].rx); in hss_hdlc_rx_irq()
651 napi_schedule(&port->napi); in hss_hdlc_rx_irq()
656 struct port *port = container_of(napi, struct port, napi); in hss_hdlc_poll() local
657 struct net_device *dev = port->netdev; in hss_hdlc_poll()
658 unsigned int rxq = queue_ids[port->id].rx; in hss_hdlc_poll()
659 unsigned int rxfreeq = queue_ids[port->id].rxfree; in hss_hdlc_poll()
675 if ((n = queue_get_desc(rxq, port, 0)) < 0) { in hss_hdlc_poll()
699 desc = rx_desc_ptr(port, n); in hss_hdlc_poll()
748 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in hss_hdlc_poll()
755 skb = port->rx_buff_tab[n]; in hss_hdlc_poll()
761 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], in hss_hdlc_poll()
775 port->rx_buff_tab[n] = temp; in hss_hdlc_poll()
780 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in hss_hdlc_poll()
793 struct port *port = dev_to_port(dev); in hss_hdlc_txdone_irq() local
799 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone, in hss_hdlc_txdone_irq()
800 port, 1)) >= 0) { in hss_hdlc_txdone_irq()
804 desc = tx_desc_ptr(port, n_desc); in hss_hdlc_txdone_irq()
809 dma_unmap_tx(port, desc); in hss_hdlc_txdone_irq()
812 dev->name, port->tx_buff_tab[n_desc]); in hss_hdlc_txdone_irq()
814 free_buffer_irq(port->tx_buff_tab[n_desc]); in hss_hdlc_txdone_irq()
815 port->tx_buff_tab[n_desc] = NULL; in hss_hdlc_txdone_irq()
817 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); in hss_hdlc_txdone_irq()
818 queue_put_desc(port->plat->txreadyq, in hss_hdlc_txdone_irq()
819 tx_desc_phys(port, n_desc), desc); in hss_hdlc_txdone_irq()
832 struct port *port = dev_to_port(dev); in hss_hdlc_xmit() local
833 unsigned int txreadyq = port->plat->txreadyq; in hss_hdlc_xmit()
879 n = queue_get_desc(txreadyq, port, 1); in hss_hdlc_xmit()
881 desc = tx_desc_ptr(port, n); in hss_hdlc_xmit()
884 port->tx_buff_tab[n] = skb; in hss_hdlc_xmit()
886 port->tx_buff_tab[n] = mem; in hss_hdlc_xmit()
892 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); in hss_hdlc_xmit()
916 static int request_hdlc_queues(struct port *port) in request_hdlc_queues() argument
920 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0, in request_hdlc_queues()
921 "%s:RX-free", port->netdev->name); in request_hdlc_queues()
925 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0, in request_hdlc_queues()
926 "%s:RX", port->netdev->name); in request_hdlc_queues()
930 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0, in request_hdlc_queues()
931 "%s:TX", port->netdev->name); in request_hdlc_queues()
935 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, in request_hdlc_queues()
936 "%s:TX-ready", port->netdev->name); in request_hdlc_queues()
940 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0, in request_hdlc_queues()
941 "%s:TX-done", port->netdev->name); in request_hdlc_queues()
947 qmgr_release_queue(port->plat->txreadyq); in request_hdlc_queues()
949 qmgr_release_queue(queue_ids[port->id].tx); in request_hdlc_queues()
951 qmgr_release_queue(queue_ids[port->id].rx); in request_hdlc_queues()
953 qmgr_release_queue(queue_ids[port->id].rxfree); in request_hdlc_queues()
955 port->netdev->name); in request_hdlc_queues()
959 static void release_hdlc_queues(struct port *port) in release_hdlc_queues() argument
961 qmgr_release_queue(queue_ids[port->id].rxfree); in release_hdlc_queues()
962 qmgr_release_queue(queue_ids[port->id].rx); in release_hdlc_queues()
963 qmgr_release_queue(queue_ids[port->id].txdone); in release_hdlc_queues()
964 qmgr_release_queue(queue_ids[port->id].tx); in release_hdlc_queues()
965 qmgr_release_queue(port->plat->txreadyq); in release_hdlc_queues()
968 static int init_hdlc_queues(struct port *port) in init_hdlc_queues() argument
973 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, in init_hdlc_queues()
979 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, in init_hdlc_queues()
980 &port->desc_tab_phys))) in init_hdlc_queues()
982 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); in init_hdlc_queues()
983 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ in init_hdlc_queues()
984 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); in init_hdlc_queues()
988 struct desc *desc = rx_desc_ptr(port, i); in init_hdlc_queues()
992 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE))) in init_hdlc_queues()
1001 desc->data = dma_map_single(&port->netdev->dev, data, in init_hdlc_queues()
1003 if (dma_mapping_error(&port->netdev->dev, desc->data)) { in init_hdlc_queues()
1007 port->rx_buff_tab[i] = buff; in init_hdlc_queues()
1013 static void destroy_hdlc_queues(struct port *port) in destroy_hdlc_queues() argument
1017 if (port->desc_tab) { in destroy_hdlc_queues()
1019 struct desc *desc = rx_desc_ptr(port, i); in destroy_hdlc_queues()
1020 buffer_t *buff = port->rx_buff_tab[i]; in destroy_hdlc_queues()
1022 dma_unmap_single(&port->netdev->dev, in destroy_hdlc_queues()
1029 struct desc *desc = tx_desc_ptr(port, i); in destroy_hdlc_queues()
1030 buffer_t *buff = port->tx_buff_tab[i]; in destroy_hdlc_queues()
1032 dma_unmap_tx(port, desc); in destroy_hdlc_queues()
1036 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); in destroy_hdlc_queues()
1037 port->desc_tab = NULL; in destroy_hdlc_queues()
1048 struct port *port = dev_to_port(dev); in hss_hdlc_open() local
1055 if ((err = hss_load_firmware(port))) in hss_hdlc_open()
1058 if ((err = request_hdlc_queues(port))) in hss_hdlc_open()
1061 if ((err = init_hdlc_queues(port))) in hss_hdlc_open()
1065 if (port->plat->open) in hss_hdlc_open()
1066 if ((err = port->plat->open(port->id, dev, in hss_hdlc_open()
1073 queue_put_desc(port->plat->txreadyq, in hss_hdlc_open()
1074 tx_desc_phys(port, i), tx_desc_ptr(port, i)); in hss_hdlc_open()
1077 queue_put_desc(queue_ids[port->id].rxfree, in hss_hdlc_open()
1078 rx_desc_phys(port, i), rx_desc_ptr(port, i)); in hss_hdlc_open()
1080 napi_enable(&port->napi); in hss_hdlc_open()
1083 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, in hss_hdlc_open()
1086 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, in hss_hdlc_open()
1088 qmgr_enable_irq(queue_ids[port->id].txdone); in hss_hdlc_open()
1092 hss_set_hdlc_cfg(port); in hss_hdlc_open()
1093 hss_config(port); in hss_hdlc_open()
1095 hss_start_hdlc(port); in hss_hdlc_open()
1098 napi_schedule(&port->napi); in hss_hdlc_open()
1104 destroy_hdlc_queues(port); in hss_hdlc_open()
1105 release_hdlc_queues(port); in hss_hdlc_open()
1113 struct port *port = dev_to_port(dev); in hss_hdlc_close() local
1119 qmgr_disable_irq(queue_ids[port->id].rx); in hss_hdlc_close()
1121 napi_disable(&port->napi); in hss_hdlc_close()
1123 hss_stop_hdlc(port); in hss_hdlc_close()
1125 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) in hss_hdlc_close()
1127 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) in hss_hdlc_close()
1135 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) in hss_hdlc_close()
1140 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) in hss_hdlc_close()
1153 qmgr_disable_irq(queue_ids[port->id].txdone); in hss_hdlc_close()
1155 if (port->plat->close) in hss_hdlc_close()
1156 port->plat->close(port->id, dev); in hss_hdlc_close()
1159 destroy_hdlc_queues(port); in hss_hdlc_close()
1160 release_hdlc_queues(port); in hss_hdlc_close()
1169 struct port *port = dev_to_port(dev); in hss_hdlc_attach() local
1176 port->hdlc_cfg = 0; in hss_hdlc_attach()
1180 port->hdlc_cfg = PKT_HDLC_CRC_32; in hss_hdlc_attach()
1250 struct port *port = dev_to_port(dev); in hss_hdlc_ioctl() local
1265 new_line.clock_type = port->clock_type; in hss_hdlc_ioctl()
1266 new_line.clock_rate = port->clock_rate; in hss_hdlc_ioctl()
1267 new_line.loopback = port->loopback; in hss_hdlc_ioctl()
1280 if (port->plat->set_clock) in hss_hdlc_ioctl()
1281 clk = port->plat->set_clock(port->id, clk); in hss_hdlc_ioctl()
1289 port->clock_type = clk; /* Update settings */ in hss_hdlc_ioctl()
1291 find_best_clock(new_line.clock_rate, &port->clock_rate, in hss_hdlc_ioctl()
1292 &port->clock_reg); in hss_hdlc_ioctl()
1294 port->clock_rate = 0; in hss_hdlc_ioctl()
1295 port->clock_reg = CLK42X_SPEED_2048KHZ; in hss_hdlc_ioctl()
1297 port->loopback = new_line.loopback; in hss_hdlc_ioctl()
1302 hss_config(port); in hss_hdlc_ioctl()
1304 if (port->loopback || port->carrier) in hss_hdlc_ioctl()
1305 netif_carrier_on(port->netdev); in hss_hdlc_ioctl()
1307 netif_carrier_off(port->netdev); in hss_hdlc_ioctl()
1331 struct port *port; in hss_init_one() local
1336 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL) in hss_init_one()
1339 if ((port->npe = npe_request(0)) == NULL) { in hss_init_one()
1344 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) { in hss_init_one()
1355 port->clock_type = CLOCK_EXT; in hss_init_one()
1356 port->clock_rate = 0; in hss_init_one()
1357 port->clock_reg = CLK42X_SPEED_2048KHZ; in hss_init_one()
1358 port->id = pdev->id; in hss_init_one()
1359 port->dev = &pdev->dev; in hss_init_one()
1360 port->plat = pdev->dev.platform_data; in hss_init_one()
1361 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); in hss_init_one()
1366 platform_set_drvdata(pdev, port); in hss_init_one()
1374 npe_release(port->npe); in hss_init_one()
1376 kfree(port); in hss_init_one()
1382 struct port *port = platform_get_drvdata(pdev); in hss_remove_one() local
1384 unregister_hdlc_device(port->netdev); in hss_remove_one()
1385 free_netdev(port->netdev); in hss_remove_one()
1386 npe_release(port->npe); in hss_remove_one()
1387 kfree(port); in hss_remove_one()