Lines Matching refs:eqo
1844 static int be_get_new_eqd(struct be_eq_obj *eqo) in be_get_new_eqd() argument
1846 struct be_adapter *adapter = eqo->adapter; in be_get_new_eqd()
1856 aic = &adapter->aic_obj[eqo->idx]; in be_get_new_eqd()
1864 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_get_new_eqd()
1871 for_all_tx_queues_on_eq(adapter, eqo, txo, i) { in be_get_new_eqd()
1906 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo) in be_get_eq_delay_mult_enc() argument
1908 struct be_adapter *adapter = eqo->adapter; in be_get_eq_delay_mult_enc()
1909 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx]; in be_get_eq_delay_mult_enc()
1921 eqd = be_get_new_eqd(eqo); in be_get_eq_delay_mult_enc()
1941 struct be_eq_obj *eqo; in be_eqd_update() local
1944 for_all_evt_queues(adapter, eqo, i) { in be_eqd_update()
1945 aic = &adapter->aic_obj[eqo->idx]; in be_eqd_update()
1946 eqd = be_get_new_eqd(eqo); in be_eqd_update()
1949 set_eqd[num].eq_id = eqo->q.id; in be_eqd_update()
2433 static inline int events_get(struct be_eq_obj *eqo) in events_get() argument
2439 eqe = queue_tail_node(&eqo->q); in events_get()
2446 queue_tail_inc(&eqo->q); in events_get()
2453 static void be_eq_clean(struct be_eq_obj *eqo) in be_eq_clean() argument
2455 int num = events_get(eqo); in be_eq_clean()
2457 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); in be_eq_clean()
2584 struct be_eq_obj *eqo; in be_evt_queues_destroy() local
2587 for_all_evt_queues(adapter, eqo, i) { in be_evt_queues_destroy()
2588 if (eqo->q.created) { in be_evt_queues_destroy()
2589 be_eq_clean(eqo); in be_evt_queues_destroy()
2590 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); in be_evt_queues_destroy()
2591 napi_hash_del(&eqo->napi); in be_evt_queues_destroy()
2592 netif_napi_del(&eqo->napi); in be_evt_queues_destroy()
2593 free_cpumask_var(eqo->affinity_mask); in be_evt_queues_destroy()
2595 be_queue_free(adapter, &eqo->q); in be_evt_queues_destroy()
2602 struct be_eq_obj *eqo; in be_evt_queues_create() local
2609 for_all_evt_queues(adapter, eqo, i) { in be_evt_queues_create()
2613 eqo->adapter = adapter; in be_evt_queues_create()
2614 eqo->idx = i; in be_evt_queues_create()
2618 eq = &eqo->q; in be_evt_queues_create()
2624 rc = be_cmd_eq_create(adapter, eqo); in be_evt_queues_create()
2628 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) in be_evt_queues_create()
2631 eqo->affinity_mask); in be_evt_queues_create()
2632 netif_napi_add(adapter->netdev, &eqo->napi, be_poll, in be_evt_queues_create()
2634 napi_hash_add(&eqo->napi); in be_evt_queues_create()
2710 struct be_eq_obj *eqo; in be_tx_qs_create() local
2728 eqo = &adapter->eq_obj[i % adapter->num_evt_qs]; in be_tx_qs_create()
2729 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3); in be_tx_qs_create()
2742 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask, in be_tx_qs_create()
2743 eqo->idx); in be_tx_qs_create()
2809 struct be_eq_obj *eqo = dev; in be_intx() local
2810 struct be_adapter *adapter = eqo->adapter; in be_intx()
2821 if (napi_schedule_prep(&eqo->napi)) { in be_intx()
2822 num_evts = events_get(eqo); in be_intx()
2823 __napi_schedule(&eqo->napi); in be_intx()
2825 eqo->spurious_intr = 0; in be_intx()
2827 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0); in be_intx()
2833 if (num_evts || eqo->spurious_intr++ == 0) in be_intx()
2841 struct be_eq_obj *eqo = dev; in be_msix() local
2843 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0); in be_msix()
2844 napi_schedule(&eqo->napi); in be_msix()
2986 static inline bool be_lock_napi(struct be_eq_obj *eqo) in be_lock_napi() argument
2990 spin_lock(&eqo->lock); /* BH is already disabled */ in be_lock_napi()
2991 if (eqo->state & BE_EQ_LOCKED) { in be_lock_napi()
2992 WARN_ON(eqo->state & BE_EQ_NAPI); in be_lock_napi()
2993 eqo->state |= BE_EQ_NAPI_YIELD; in be_lock_napi()
2996 eqo->state = BE_EQ_NAPI; in be_lock_napi()
2998 spin_unlock(&eqo->lock); in be_lock_napi()
3002 static inline void be_unlock_napi(struct be_eq_obj *eqo) in be_unlock_napi() argument
3004 spin_lock(&eqo->lock); /* BH is already disabled */ in be_unlock_napi()
3006 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); in be_unlock_napi()
3007 eqo->state = BE_EQ_IDLE; in be_unlock_napi()
3009 spin_unlock(&eqo->lock); in be_unlock_napi()
3012 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) in be_lock_busy_poll() argument
3016 spin_lock_bh(&eqo->lock); in be_lock_busy_poll()
3017 if (eqo->state & BE_EQ_LOCKED) { in be_lock_busy_poll()
3018 eqo->state |= BE_EQ_POLL_YIELD; in be_lock_busy_poll()
3021 eqo->state |= BE_EQ_POLL; in be_lock_busy_poll()
3023 spin_unlock_bh(&eqo->lock); in be_lock_busy_poll()
3027 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) in be_unlock_busy_poll() argument
3029 spin_lock_bh(&eqo->lock); in be_unlock_busy_poll()
3031 WARN_ON(eqo->state & (BE_EQ_NAPI)); in be_unlock_busy_poll()
3032 eqo->state = BE_EQ_IDLE; in be_unlock_busy_poll()
3034 spin_unlock_bh(&eqo->lock); in be_unlock_busy_poll()
3037 static inline void be_enable_busy_poll(struct be_eq_obj *eqo) in be_enable_busy_poll() argument
3039 spin_lock_init(&eqo->lock); in be_enable_busy_poll()
3040 eqo->state = BE_EQ_IDLE; in be_enable_busy_poll()
3043 static inline void be_disable_busy_poll(struct be_eq_obj *eqo) in be_disable_busy_poll() argument
3050 while (!be_lock_napi(eqo)) in be_disable_busy_poll()
3058 static inline bool be_lock_napi(struct be_eq_obj *eqo) in be_lock_napi() argument
3063 static inline void be_unlock_napi(struct be_eq_obj *eqo) in be_unlock_napi() argument
3067 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) in be_lock_busy_poll() argument
3072 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) in be_unlock_busy_poll() argument
3076 static inline void be_enable_busy_poll(struct be_eq_obj *eqo) in be_enable_busy_poll() argument
3080 static inline void be_disable_busy_poll(struct be_eq_obj *eqo) in be_disable_busy_poll() argument
3087 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); in be_poll() local
3088 struct be_adapter *adapter = eqo->adapter; in be_poll()
3094 num_evts = events_get(eqo); in be_poll()
3096 for_all_tx_queues_on_eq(adapter, eqo, txo, i) in be_poll()
3099 if (be_lock_napi(eqo)) { in be_poll()
3104 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_poll()
3108 be_unlock_napi(eqo); in be_poll()
3113 if (is_mcc_eqo(eqo)) in be_poll()
3123 mult_enc = be_get_eq_delay_mult_enc(eqo); in be_poll()
3125 be_eq_notify(adapter, eqo->q.id, true, false, num_evts, in be_poll()
3129 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0); in be_poll()
3137 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); in be_busy_poll() local
3138 struct be_adapter *adapter = eqo->adapter; in be_busy_poll()
3142 if (!be_lock_busy_poll(eqo)) in be_busy_poll()
3145 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_busy_poll()
3151 be_unlock_busy_poll(eqo); in be_busy_poll()
3279 struct be_eq_obj *eqo) in be_msix_vec_get() argument
3281 return adapter->msix_entries[eqo->msix_idx].vector; in be_msix_vec_get()
3287 struct be_eq_obj *eqo; in be_msix_register() local
3290 for_all_evt_queues(adapter, eqo, i) { in be_msix_register()
3291 sprintf(eqo->desc, "%s-q%d", netdev->name, i); in be_msix_register()
3292 vec = be_msix_vec_get(adapter, eqo); in be_msix_register()
3293 status = request_irq(vec, be_msix, 0, eqo->desc, eqo); in be_msix_register()
3297 irq_set_affinity_hint(vec, eqo->affinity_mask); in be_msix_register()
3303 eqo = &adapter->eq_obj[i]; in be_msix_register()
3304 free_irq(be_msix_vec_get(adapter, eqo), eqo); in be_msix_register()
3343 struct be_eq_obj *eqo; in be_irq_unregister() local
3356 for_all_evt_queues(adapter, eqo, i) { in be_irq_unregister()
3357 vec = be_msix_vec_get(adapter, eqo); in be_irq_unregister()
3359 free_irq(vec, eqo); in be_irq_unregister()
3426 struct be_eq_obj *eqo; in be_close() local
3438 for_all_evt_queues(adapter, eqo, i) { in be_close()
3439 napi_disable(&eqo->napi); in be_close()
3440 be_disable_busy_poll(eqo); in be_close()
3455 for_all_evt_queues(adapter, eqo, i) { in be_close()
3457 synchronize_irq(be_msix_vec_get(adapter, eqo)); in be_close()
3460 be_eq_clean(eqo); in be_close()
3566 struct be_eq_obj *eqo; in be_open() local
3592 for_all_evt_queues(adapter, eqo, i) { in be_open()
3593 napi_enable(&eqo->napi); in be_open()
3594 be_enable_busy_poll(eqo); in be_open()
3595 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0); in be_open()
4483 struct be_eq_obj *eqo; in be_netpoll() local
4486 for_all_evt_queues(adapter, eqo, i) { in be_netpoll()
4487 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0); in be_netpoll()
4488 napi_schedule(&eqo->napi); in be_netpoll()