/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_droq.c | 184 buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size); octeon_droq_setup_ring_buffers() 212 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) octeon_delete_droq() argument 214 struct octeon_droq *droq = oct->droq[q_no]; octeon_delete_droq() 216 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); octeon_delete_droq() 237 u32 q_no, octeon_init_droq() 246 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); octeon_init_droq() 248 droq = oct->droq[q_no]; octeon_init_droq() 252 droq->q_no = q_no; octeon_init_droq() 256 droq->app_ctx = (void *)(size_t)q_no; octeon_init_droq() 276 "Output queue %d ring alloc failed\n", q_no); octeon_init_droq() 281 q_no, droq->desc_ring, droq->desc_ring_dma); octeon_init_droq() 282 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, octeon_init_droq() 321 oct->fn_list.setup_oq_regs(oct, q_no); octeon_init_droq() 323 oct->io_qmask.oq |= (1 << q_no); octeon_init_droq() 328 octeon_delete_droq(oct, q_no); octeon_init_droq() 461 buf = recv_buffer_alloc(octeon_dev, droq->q_no, octeon_droq_refill() 596 droq->q_no, droq->read_idx, pkt_count); octeon_droq_fast_process_packets() 631 droq->q_no, octeon_droq_fast_process_packets() 806 octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, octeon_process_droq_poll_cmd() argument 817 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { octeon_process_droq_poll_cmd() 819 __func__, q_no, (oct->num_oqs - 1)); octeon_process_droq_poll_cmd() 823 droq = oct->droq[q_no]; octeon_process_droq_poll_cmd() 849 value |= (1 << q_no); octeon_process_droq_poll_cmd() 856 value |= (1 << q_no); octeon_process_droq_poll_cmd() 877 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, octeon_register_droq_ops() argument 895 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { octeon_register_droq_ops() 897 __func__, q_no, (oct->num_oqs - 1)); octeon_register_droq_ops() 901 droq = oct->droq[q_no]; octeon_register_droq_ops() 912 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) octeon_unregister_droq_ops() argument 923 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { octeon_unregister_droq_ops() 925 __func__, q_no, oct->num_oqs - 1); octeon_unregister_droq_ops() 929 droq = oct->droq[q_no]; octeon_unregister_droq_ops() 933 "Droq id (%d) not available.\n", q_no); octeon_unregister_droq_ops() 948 u32 q_no, u32 num_descs, octeon_create_droq() 953 if (oct->droq[q_no]) { octeon_create_droq() 955 q_no); octeon_create_droq() 966 octeon_set_droq_pkt_op(oct, q_no, 0); octeon_create_droq() 967 oct->droq[q_no] = droq; octeon_create_droq() 970 octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx); octeon_create_droq() 985 octeon_delete_droq(oct, q_no); octeon_create_droq() 236 octeon_init_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx) octeon_init_droq() argument 947 octeon_create_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx) octeon_create_droq() argument
|
H A D | octeon_droq.h | 240 u32 q_no; member in struct:octeon_droq 335 * @param q_no - droq no. ranges from 0 - 3. 340 u32 q_no, 349 * @param q_no - droq no. ranges from 0 - 3. 352 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); 356 * on output queues given by q_no irrespective of the type of packet. 361 * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 367 u32 q_no, 373 * given by q_no. 375 * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 378 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no); 416 int octeon_create_droq(struct octeon_device *oct, u32 q_no, 423 int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
|
H A D | octeon_nic.h | 88 u32 q_no; member in struct:octnic_data_pkt 120 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) octnet_iq_is_full() argument 122 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) octnet_iq_is_full() 123 >= (oct->instr_queue[q_no]->max_count - 2)); octnet_iq_is_full() 129 * @param q_no - which queue for back pressure
|
H A D | lio_main.c | 216 int q_no; octeon_droq_bh() local 222 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ octeon_droq_bh() 223 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { octeon_droq_bh() 224 if (!(oct->io_qmask.oq & (1UL << q_no))) octeon_droq_bh() 226 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], octeon_droq_bh() 1675 * @param q_no which queue 1680 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, octeon_setup_droq() argument 1685 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); octeon_setup_droq() 1687 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); octeon_setup_droq() 1692 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); octeon_setup_droq() 1698 octeon_set_droq_pkt_op(oct, q_no, 1); octeon_setup_droq() 1703 writel(oct->droq[q_no]->max_count, octeon_setup_droq() 1704 oct->droq[q_no]->pkts_credit_reg); octeon_setup_droq() 1830 droq->q_no, droq->stats.rx_dropped); liquidio_push_packet() 1884 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, liquidio_napi_do_rx() 1889 "Receive work_done < 0, rxq:%d\n", droq->q_no); liquidio_napi_do_rx() 1901 octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, liquidio_napi_do_rx() 1922 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, liquidio_napi_poll() 1948 int q, q_no, retval = 0; setup_io_queues() local 1967 q_no = lio->linfo.rxpciq[q]; setup_io_queues() 1969 retval = octeon_setup_droq(octeon_dev, q_no, setup_io_queues() 1983 droq = octeon_dev->droq[q_no]; setup_io_queues() 1993 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); setup_io_queues() 2579 sc->iq_no = ndata->q_no; send_nic_timestamp_pkt() 2718 ndata.q_no = iq_no; liquidio_xmit() 2721 if (octnet_iq_is_full(oct, ndata.q_no)) { liquidio_xmit() 2724 ndata.q_no); liquidio_xmit() 2733 ndata.q_no); liquidio_xmit() 2737 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", liquidio_xmit() 2738 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); liquidio_xmit() 3130 int retval, num_iqueues, num_oqueues, q_no; setup_nic_devices() local 3262 q_no = __ffs64(q_mask); setup_nic_devices() 3263 q_mask &= (~(1UL << q_no)); setup_nic_devices() 3264 lio->linfo.rxpciq[j] = q_no; setup_nic_devices() 3268 q_no = __ffs64(q_mask); setup_nic_devices() 3269 q_mask &= (~(1UL << q_no)); setup_nic_devices() 3270 lio->linfo.txpciq[j] = q_no; setup_nic_devices()
|
H A D | octeon_network.h | 136 u32 q_no __attribute__((unused)), u32 size) recv_buffer_alloc() 210 u32 q_no, u32 size) octeon_fast_packet_alloc() 212 return recv_buffer_alloc(oct, q_no, size); octeon_fast_packet_alloc() 208 octeon_fast_packet_alloc(struct octeon_device *oct, struct octeon_droq *droq, u32 q_no, u32 size) octeon_fast_packet_alloc() argument
|
H A D | octeon_device.c | 822 u32 q_no, octeon_set_droq_pkt_op() 831 reg_val = reg_val | (1 << q_no); octeon_set_droq_pkt_op() 833 reg_val = reg_val & (~(1 << q_no)); octeon_set_droq_pkt_op() 1154 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) octeon_get_tx_qsize() argument 1157 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) && octeon_get_tx_qsize() 1158 (oct->io_qmask.iq & (1UL << q_no))) octeon_get_tx_qsize() 1159 return oct->instr_queue[q_no]->max_count; octeon_get_tx_qsize() 1164 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) octeon_get_rx_qsize() argument 1166 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) && octeon_get_rx_qsize() 1167 (oct->io_qmask.oq & (1UL << q_no))) octeon_get_rx_qsize() 1168 return oct->droq[q_no]->max_count; octeon_get_rx_qsize() 821 octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable) octeon_set_droq_pkt_op() argument
|
H A D | cn66xx_regs.h | 478 #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ 479 (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8)) 483 #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ 484 (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8))
|
H A D | octeon_iq.h | 271 * @param iq_no - queue to be initialized (0 <= q_no <= 3). 284 * @param iq_no - queue to be deleted (0 <= q_no <= 3).
|
H A D | octeon_device.h | 620 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no); 622 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no); 631 * @param q_no which queue 634 void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
|
H A D | octeon_nic.c | 94 return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, octnet_send_nic_data_pkt()
|
H A D | lio_ethtool.c | 857 u32 j, q_no; lio_set_intr_coalesce() local 862 q_no = lio->linfo.txpciq[j]; lio_set_intr_coalesce() 863 oct->instr_queue[q_no]->fill_threshold = lio_set_intr_coalesce()
|
/linux-4.4.14/drivers/scsi/ |
H A D | advansys.c | 257 #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) 261 uchar q_no; member in struct:asc_scsiq_1 305 uchar q_no; member in struct:asc_q_done_info 357 uchar q_no; member in struct:asc_sg_list_q 6711 scsiq->q_no = (uchar)(_val >> 8); _AscCopyLramScsiDoneQ() 7964 static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) AscPutReadyQueue() argument 7986 q_addr = ASC_QNO_TO_QADDR(q_no); AscPutReadyQueue() 8002 q_no << 8) | (ushort)QS_READY)); AscPutReadyQueue() 8007 AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) AscPutReadySgListQueue() argument 8037 q_addr = ASC_QNO_TO_QADDR(q_no); AscPutReadySgListQueue() 8040 scsi_sg_q.sg_head_qp = q_no; AscPutReadySgListQueue() 8076 scsi_sg_q.q_no = next_qp; AscPutReadySgListQueue() 8093 sta = AscPutReadyQueue(asc_dvc, scsiq, q_no); AscPutReadySgListQueue() 8120 scsiq->q1.q_no = free_q_head; AscSendScsiQueue() 8127 scsiq->q1.q_no = free_q_head; AscSendScsiQueue() 8182 scsiq->q1.q_no = 0; AscExeScsiQueue()
|