Lines Matching refs:inst
63 #define for_each_handle_rcu(qh, inst) \ argument
64 list_for_each_entry_rcu(qh, &inst->handles, list)
66 #define for_each_instance(idx, inst, kdev) \ argument
67 for (idx = 0, inst = kdev->instances; \
69 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
82 void knav_queue_notify(struct knav_queue_inst *inst) in knav_queue_notify() argument
86 if (!inst) in knav_queue_notify()
90 for_each_handle_rcu(qh, inst) { in knav_queue_notify()
104 struct knav_queue_inst *inst = _instdata; in knav_queue_int_handler() local
106 knav_queue_notify(inst); in knav_queue_int_handler()
111 struct knav_queue_inst *inst) in knav_queue_setup_irq() argument
113 unsigned queue = inst->id - range->queue_base; in knav_queue_setup_irq()
121 inst->irq_name, inst); in knav_queue_setup_irq()
137 static void knav_queue_free_irq(struct knav_queue_inst *inst) in knav_queue_free_irq() argument
139 struct knav_range_info *range = inst->range; in knav_queue_free_irq()
140 unsigned queue = inst->id - inst->range->queue_base; in knav_queue_free_irq()
146 free_irq(irq, inst); in knav_queue_free_irq()
150 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) in knav_queue_is_busy() argument
152 return !list_empty(&inst->handles); in knav_queue_is_busy()
155 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) in knav_queue_is_reserved() argument
157 return inst->range->flags & RANGE_RESERVED; in knav_queue_is_reserved()
160 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) in knav_queue_is_shared() argument
165 for_each_handle_rcu(tmp, inst) { in knav_queue_is_shared()
175 static inline bool knav_queue_match_type(struct knav_queue_inst *inst, in knav_queue_match_type() argument
179 (inst->range->flags & RANGE_HAS_IRQ)) { in knav_queue_match_type()
182 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { in knav_queue_match_type()
185 !(inst->range->flags & in knav_queue_match_type()
195 struct knav_queue_inst *inst; in knav_queue_match_id_to_inst() local
198 for_each_instance(idx, inst, kdev) { in knav_queue_match_id_to_inst()
199 if (inst->id == id) in knav_queue_match_id_to_inst()
200 return inst; in knav_queue_match_id_to_inst()
215 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, in __knav_queue_open() argument
222 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); in __knav_queue_open()
227 qh->inst = inst; in __knav_queue_open()
228 id = inst->id - inst->qmgr->start_queue; in __knav_queue_open()
229 qh->reg_push = &inst->qmgr->reg_push[id]; in __knav_queue_open()
230 qh->reg_pop = &inst->qmgr->reg_pop[id]; in __knav_queue_open()
231 qh->reg_peek = &inst->qmgr->reg_peek[id]; in __knav_queue_open()
234 if (!knav_queue_is_busy(inst)) { in __knav_queue_open()
235 struct knav_range_info *range = inst->range; in __knav_queue_open()
237 inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); in __knav_queue_open()
239 ret = range->ops->open_queue(range, inst, flags); in __knav_queue_open()
242 devm_kfree(inst->kdev->dev, qh); in __knav_queue_open()
246 list_add_tail_rcu(&qh->list, &inst->handles); in __knav_queue_open()
253 struct knav_queue_inst *inst; in knav_queue_open_by_id() local
259 inst = knav_queue_find_by_id(id); in knav_queue_open_by_id()
260 if (!inst) in knav_queue_open_by_id()
264 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) in knav_queue_open_by_id()
269 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) in knav_queue_open_by_id()
272 qh = __knav_queue_open(inst, name, flags); in knav_queue_open_by_id()
283 struct knav_queue_inst *inst; in knav_queue_open_by_type() local
289 for_each_instance(idx, inst, kdev) { in knav_queue_open_by_type()
290 if (knav_queue_is_reserved(inst)) in knav_queue_open_by_type()
292 if (!knav_queue_match_type(inst, type)) in knav_queue_open_by_type()
294 if (knav_queue_is_busy(inst)) in knav_queue_open_by_type()
296 qh = __knav_queue_open(inst, name, flags); in knav_queue_open_by_type()
305 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) in knav_queue_set_notify() argument
307 struct knav_range_info *range = inst->range; in knav_queue_set_notify()
310 range->ops->set_notify(range, inst, enabled); in knav_queue_set_notify()
315 struct knav_queue_inst *inst = qh->inst; in knav_queue_enable_notifier() local
327 first = (atomic_inc_return(&inst->num_notifiers) == 1); in knav_queue_enable_notifier()
329 knav_queue_set_notify(inst, true); in knav_queue_enable_notifier()
336 struct knav_queue_inst *inst = qh->inst; in knav_queue_disable_notifier() local
343 last = (atomic_dec_return(&inst->num_notifiers) == 0); in knav_queue_disable_notifier()
345 knav_queue_set_notify(inst, false); in knav_queue_disable_notifier()
358 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) in knav_queue_set_notifier()
374 struct knav_queue_inst *inst, in knav_gp_set_notify() argument
380 queue = inst->id - range->queue_base; in knav_gp_set_notify()
390 struct knav_queue_inst *inst, unsigned flags) in knav_gp_open_queue() argument
392 return knav_queue_setup_irq(range, inst); in knav_gp_open_queue()
396 struct knav_queue_inst *inst) in knav_gp_close_queue() argument
398 knav_queue_free_irq(inst); in knav_gp_close_queue()
412 struct knav_queue_inst *inst = qh->inst; in knav_queue_get_count() local
415 atomic_read(&inst->desc_count); in knav_queue_get_count()
419 struct knav_queue_inst *inst) in knav_queue_debug_show_instance() argument
421 struct knav_device *kdev = inst->kdev; in knav_queue_debug_show_instance()
424 if (!knav_queue_is_busy(inst)) in knav_queue_debug_show_instance()
428 kdev->base_id + inst->id, inst->name); in knav_queue_debug_show_instance()
429 for_each_handle_rcu(qh, inst) { in knav_queue_debug_show_instance()
448 struct knav_queue_inst *inst; in knav_queue_debug_show() local
455 for_each_instance(idx, inst, kdev) in knav_queue_debug_show()
456 knav_queue_debug_show_instance(s, inst); in knav_queue_debug_show()
495 struct knav_queue_inst *inst = qh->inst; in knav_queue_flush() local
496 unsigned id = inst->id - inst->qmgr->start_queue; in knav_queue_flush()
498 atomic_set(&inst->desc_count, 0); in knav_queue_flush()
499 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); in knav_queue_flush()
544 struct knav_queue_inst *inst = qh->inst; in knav_queue_close() local
553 if (!knav_queue_is_busy(inst)) { in knav_queue_close()
554 struct knav_range_info *range = inst->range; in knav_queue_close()
557 range->ops->close_queue(range, inst); in knav_queue_close()
559 devm_kfree(inst->kdev->dev, qh); in knav_queue_close()
580 ret = qh->inst->kdev->base_id + qh->inst->id; in knav_queue_device_control()
647 struct knav_queue_inst *inst = qh->inst; in knav_queue_pop() local
652 if (inst->descs) { in knav_queue_pop()
653 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { in knav_queue_pop()
654 atomic_inc(&inst->desc_count); in knav_queue_pop()
657 idx = atomic_inc_return(&inst->desc_head); in knav_queue_pop()
659 val = inst->descs[idx]; in knav_queue_pop()
1633 struct knav_queue_inst *inst, in knav_queue_init_queue() argument
1637 inst->qmgr = knav_find_qmgr(id); in knav_queue_init_queue()
1638 if (!inst->qmgr) in knav_queue_init_queue()
1641 INIT_LIST_HEAD(&inst->handles); in knav_queue_init_queue()
1642 inst->kdev = kdev; in knav_queue_init_queue()
1643 inst->range = range; in knav_queue_init_queue()
1644 inst->irq_num = -1; in knav_queue_init_queue()
1645 inst->id = id; in knav_queue_init_queue()
1647 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); in knav_queue_init_queue()
1650 return range->ops->init_queue(range, inst); in knav_queue_init_queue()