Lines Matching refs:iq

60 	struct octeon_instr_queue *iq =  in IQ_INSTR_MODE_64B()  local
62 return iq->iqcmd_64B; in IQ_INSTR_MODE_64B()
74 struct octeon_instr_queue *iq; in octeon_init_instr_queue() local
97 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue()
99 iq->base_addr = lio_dma_alloc(oct, q_size, in octeon_init_instr_queue()
100 (dma_addr_t *)&iq->base_addr_dma); in octeon_init_instr_queue()
101 if (!iq->base_addr) { in octeon_init_instr_queue()
107 iq->max_count = num_descs; in octeon_init_instr_queue()
112 iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); in octeon_init_instr_queue()
113 if (!iq->request_list) { in octeon_init_instr_queue()
114 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue()
120 memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); in octeon_init_instr_queue()
123 iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); in octeon_init_instr_queue()
125 iq->iq_no = iq_no; in octeon_init_instr_queue()
126 iq->fill_threshold = (u32)conf->db_min; in octeon_init_instr_queue()
127 iq->fill_cnt = 0; in octeon_init_instr_queue()
128 iq->host_write_index = 0; in octeon_init_instr_queue()
129 iq->octeon_read_index = 0; in octeon_init_instr_queue()
130 iq->flush_index = 0; in octeon_init_instr_queue()
131 iq->last_db_time = 0; in octeon_init_instr_queue()
132 iq->do_auto_flush = 1; in octeon_init_instr_queue()
133 iq->db_timeout = (u32)conf->db_timeout; in octeon_init_instr_queue()
134 atomic_set(&iq->instr_pending, 0); in octeon_init_instr_queue()
137 spin_lock_init(&iq->lock); in octeon_init_instr_queue()
139 oct->io_qmask.iq |= (1 << iq_no); in octeon_init_instr_queue()
143 iq->iqcmd_64B = (conf->instr_type == 64); in octeon_init_instr_queue()
149 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue()
168 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in octeon_delete_instr_queue() local
178 vfree(iq->request_list); in octeon_delete_instr_queue()
180 if (iq->base_addr) { in octeon_delete_instr_queue()
181 q_size = iq->max_count * desc_size; in octeon_delete_instr_queue()
182 lio_dma_free(oct, (u32)q_size, iq->base_addr, in octeon_delete_instr_queue()
183 iq->base_addr_dma); in octeon_delete_instr_queue()
230 if (!(oct->io_qmask.iq & (1UL << i))) in lio_wait_for_instr_fetch()
251 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) in ring_doorbell() argument
254 writel(iq->fill_cnt, iq->doorbell_reg); in ring_doorbell()
257 iq->fill_cnt = 0; in ring_doorbell()
258 iq->last_db_time = jiffies; in ring_doorbell()
263 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, in __copy_cmd_into_iq() argument
268 cmdsize = ((iq->iqcmd_64B) ? 64 : 32); in __copy_cmd_into_iq()
269 iqptr = iq->base_addr + (cmdsize * iq->host_write_index); in __copy_cmd_into_iq()
276 struct octeon_instr_queue *iq, in __post_command() argument
284 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) in __post_command()
287 __copy_cmd_into_iq(iq, cmd); in __post_command()
290 index = iq->host_write_index; in __post_command()
291 INCR_INDEX_BY1(iq->host_write_index, iq->max_count); in __post_command()
292 iq->fill_cnt++; in __post_command()
299 atomic_inc(&iq->instr_pending); in __post_command()
306 struct octeon_instr_queue *iq, in __post_command2() argument
316 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) { in __post_command2()
322 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2)) in __post_command2()
325 __copy_cmd_into_iq(iq, cmd); in __post_command2()
328 st.index = iq->host_write_index; in __post_command2()
329 INCR_INDEX_BY1(iq->host_write_index, iq->max_count); in __post_command2()
330 iq->fill_cnt++; in __post_command2()
337 atomic_inc(&iq->instr_pending); in __post_command2()
358 __add_to_request_list(struct octeon_instr_queue *iq, in __add_to_request_list() argument
361 iq->request_list[idx].buf = buf; in __add_to_request_list()
362 iq->request_list[idx].reqtype = reqtype; in __add_to_request_list()
367 struct octeon_instr_queue *iq) in lio_process_iq_request_list() argument
371 u32 old = iq->flush_index; in lio_process_iq_request_list()
377 while (old != iq->octeon_read_index) { in lio_process_iq_request_list()
378 reqtype = iq->request_list[old].reqtype; in lio_process_iq_request_list()
379 buf = iq->request_list[old].buf; in lio_process_iq_request_list()
427 iq->request_list[old].buf = NULL; in lio_process_iq_request_list()
428 iq->request_list[old].reqtype = 0; in lio_process_iq_request_list()
432 INCR_INDEX_BY1(old, iq->max_count); in lio_process_iq_request_list()
435 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, in lio_process_iq_request_list()
437 iq->flush_index = old; in lio_process_iq_request_list()
443 update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq) in update_iq_indices() argument
450 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq); in update_iq_indices()
453 if (iq->flush_index != iq->octeon_read_index) in update_iq_indices()
454 inst_processed = lio_process_iq_request_list(oct, iq); in update_iq_indices()
457 atomic_sub(inst_processed, &iq->instr_pending); in update_iq_indices()
458 iq->stats.instr_processed += inst_processed; in update_iq_indices()
463 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, in octeon_flush_iq() argument
466 if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { in octeon_flush_iq()
467 spin_lock_bh(&iq->lock); in octeon_flush_iq()
468 update_iq_indices(oct, iq); in octeon_flush_iq()
469 spin_unlock_bh(&iq->lock); in octeon_flush_iq()
475 struct octeon_instr_queue *iq; in __check_db_timeout() local
480 iq = oct->instr_queue[iq_no]; in __check_db_timeout()
481 if (!iq) in __check_db_timeout()
485 next_time = iq->last_db_time + iq->db_timeout; in __check_db_timeout()
488 iq->last_db_time = jiffies; in __check_db_timeout()
493 spin_lock_bh(&iq->lock); in __check_db_timeout()
494 if (iq->fill_cnt != 0) in __check_db_timeout()
495 ring_doorbell(oct, iq); in __check_db_timeout()
497 spin_unlock_bh(&iq->lock); in __check_db_timeout()
500 if (iq->do_auto_flush) in __check_db_timeout()
501 octeon_flush_iq(oct, iq, 1); in __check_db_timeout()
524 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in octeon_send_command() local
526 spin_lock_bh(&iq->lock); in octeon_send_command()
528 st = __post_command2(oct, iq, force_db, cmd); in octeon_send_command()
532 __add_to_request_list(iq, st.index, buf, reqtype); in octeon_send_command()
536 if (iq->fill_cnt >= iq->fill_threshold || force_db) in octeon_send_command()
537 ring_doorbell(oct, iq); in octeon_send_command()
542 spin_unlock_bh(&iq->lock); in octeon_send_command()
544 if (iq->do_auto_flush) in octeon_send_command()
545 octeon_flush_iq(oct, iq, 2); in octeon_send_command()
603 while (!(oct->io_qmask.iq & (1 << sc->iq_no))) in octeon_prepare_soft_command()