Lines Matching refs:tx

3014 #define call_tx_stop_cbfn(tx)						\  argument
3016 if ((tx)->stop_cbfn) { \
3019 cbfn = (tx)->stop_cbfn; \
3020 cbarg = (tx)->stop_cbarg; \
3021 (tx)->stop_cbfn = NULL; \
3022 (tx)->stop_cbarg = NULL; \
3023 cbfn(cbarg, (tx)); \
3027 #define call_tx_prio_change_cbfn(tx) \ argument
3029 if ((tx)->prio_change_cbfn) { \
3031 cbfn = (tx)->prio_change_cbfn; \
3032 (tx)->prio_change_cbfn = NULL; \
3033 cbfn((tx)->bna->bnad, (tx)); \
3037 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3038 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3039 static void bna_tx_enet_stop(struct bna_tx *tx);
3067 bna_tx_sm_stopped_entry(struct bna_tx *tx) in bna_tx_sm_stopped_entry() argument
3069 call_tx_stop_cbfn(tx); in bna_tx_sm_stopped_entry()
3073 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_stopped() argument
3077 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_stopped()
3081 call_tx_stop_cbfn(tx); in bna_tx_sm_stopped()
3089 call_tx_prio_change_cbfn(tx); in bna_tx_sm_stopped()
3102 bna_tx_sm_start_wait_entry(struct bna_tx *tx) in bna_tx_sm_start_wait_entry() argument
3104 bna_bfi_tx_enet_start(tx); in bna_tx_sm_start_wait_entry()
3108 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_start_wait() argument
3112 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); in bna_tx_sm_start_wait()
3113 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_start_wait()
3117 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); in bna_tx_sm_start_wait()
3118 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_start_wait()
3122 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { in bna_tx_sm_start_wait()
3123 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | in bna_tx_sm_start_wait()
3125 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); in bna_tx_sm_start_wait()
3127 bfa_fsm_set_state(tx, bna_tx_sm_started); in bna_tx_sm_start_wait()
3131 tx->flags |= BNA_TX_F_PRIO_CHANGED; in bna_tx_sm_start_wait()
3135 tx->flags |= BNA_TX_F_BW_UPDATED; in bna_tx_sm_start_wait()
3144 bna_tx_sm_started_entry(struct bna_tx *tx) in bna_tx_sm_started_entry() argument
3148 int is_regular = (tx->type == BNA_TX_T_REGULAR); in bna_tx_sm_started_entry()
3150 list_for_each(qe, &tx->txq_q) { in bna_tx_sm_started_entry()
3154 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
3156 tx->tx_resume_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started_entry()
3160 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_started() argument
3164 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_started()
3165 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
3166 bna_tx_enet_stop(tx); in bna_tx_sm_started()
3170 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_started()
3171 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
3172 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_started()
3177 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); in bna_tx_sm_started()
3186 bna_tx_sm_stop_wait_entry(struct bna_tx *tx) in bna_tx_sm_stop_wait_entry() argument
3191 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_stop_wait() argument
3196 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_stop_wait()
3197 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_stop_wait()
3205 bna_tx_enet_stop(tx); in bna_tx_sm_stop_wait()
3219 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) in bna_tx_sm_cleanup_wait_entry() argument
3224 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_cleanup_wait() argument
3234 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_cleanup_wait()
3243 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) in bna_tx_sm_prio_stop_wait_entry() argument
3245 tx->tx_stall_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait_entry()
3246 bna_tx_enet_stop(tx); in bna_tx_sm_prio_stop_wait_entry()
3250 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_prio_stop_wait() argument
3254 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); in bna_tx_sm_prio_stop_wait()
3258 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_prio_stop_wait()
3259 call_tx_prio_change_cbfn(tx); in bna_tx_sm_prio_stop_wait()
3260 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_stop_wait()
3264 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); in bna_tx_sm_prio_stop_wait()
3278 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) in bna_tx_sm_prio_cleanup_wait_entry() argument
3280 call_tx_prio_change_cbfn(tx); in bna_tx_sm_prio_cleanup_wait_entry()
3281 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); in bna_tx_sm_prio_cleanup_wait_entry()
3285 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_prio_cleanup_wait() argument
3289 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_prio_cleanup_wait()
3293 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_prio_cleanup_wait()
3302 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_prio_cleanup_wait()
3311 bna_tx_sm_failed_entry(struct bna_tx *tx) in bna_tx_sm_failed_entry() argument
3316 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_failed() argument
3320 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); in bna_tx_sm_failed()
3324 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_failed()
3332 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_sm_failed()
3341 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) in bna_tx_sm_quiesce_wait_entry() argument
3346 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) in bna_tx_sm_quiesce_wait() argument
3350 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); in bna_tx_sm_quiesce_wait()
3354 bfa_fsm_set_state(tx, bna_tx_sm_failed); in bna_tx_sm_quiesce_wait()
3358 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); in bna_tx_sm_quiesce_wait()
3371 bna_bfi_tx_enet_start(struct bna_tx *tx) in bna_bfi_tx_enet_start() argument
3373 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; in bna_bfi_tx_enet_start()
3379 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); in bna_bfi_tx_enet_start()
3383 cfg_req->num_queues = tx->num_txq; in bna_bfi_tx_enet_start()
3384 for (i = 0, qe = bfa_q_first(&tx->txq_q); in bna_bfi_tx_enet_start()
3385 i < tx->num_txq; in bna_bfi_tx_enet_start()
3413 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); in bna_bfi_tx_enet_start()
3417 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, in bna_bfi_tx_enet_start()
3419 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_start()
3423 bna_bfi_tx_enet_stop(struct bna_tx *tx) in bna_bfi_tx_enet_stop() argument
3425 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; in bna_bfi_tx_enet_stop()
3428 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); in bna_bfi_tx_enet_stop()
3431 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), in bna_bfi_tx_enet_stop()
3433 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); in bna_bfi_tx_enet_stop()
3437 bna_tx_enet_stop(struct bna_tx *tx) in bna_tx_enet_stop() argument
3443 list_for_each(qe, &tx->txq_q) { in bna_tx_enet_stop()
3445 bna_ib_stop(tx->bna, &txq->ib); in bna_tx_enet_stop()
3448 bna_bfi_tx_enet_stop(tx); in bna_tx_enet_stop()
3491 struct bna_tx *tx = NULL; in bna_tx_get() local
3500 tx = (struct bna_tx *)qe; in bna_tx_get()
3501 bfa_q_qe_init(&tx->qe); in bna_tx_get()
3502 tx->type = type; in bna_tx_get()
3504 return tx; in bna_tx_get()
3508 bna_tx_free(struct bna_tx *tx) in bna_tx_free() argument
3510 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; in bna_tx_free()
3515 while (!list_empty(&tx->txq_q)) { in bna_tx_free()
3516 bfa_q_deq(&tx->txq_q, &txq); in bna_tx_free()
3519 txq->tx = NULL; in bna_tx_free()
3524 if (qe == &tx->qe) { in bna_tx_free()
3525 list_del(&tx->qe); in bna_tx_free()
3526 bfa_q_qe_init(&tx->qe); in bna_tx_free()
3531 tx->bna = NULL; in bna_tx_free()
3532 tx->priv = NULL; in bna_tx_free()
3536 if (((struct bna_tx *)qe)->rid < tx->rid) in bna_tx_free()
3545 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); in bna_tx_free()
3548 list_add_tail(&tx->qe, &tx_mod->tx_free_q); in bna_tx_free()
3551 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); in bna_tx_free()
3552 bfa_q_prev(&tx->qe) = prev_qe; in bna_tx_free()
3553 bfa_q_next(prev_qe) = &tx->qe; in bna_tx_free()
3554 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; in bna_tx_free()
3559 bna_tx_start(struct bna_tx *tx) in bna_tx_start() argument
3561 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_start()
3562 if (tx->flags & BNA_TX_F_ENABLED) in bna_tx_start()
3563 bfa_fsm_send_event(tx, TX_E_START); in bna_tx_start()
3567 bna_tx_stop(struct bna_tx *tx) in bna_tx_stop() argument
3569 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; in bna_tx_stop()
3570 tx->stop_cbarg = &tx->bna->tx_mod; in bna_tx_stop()
3572 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_stop()
3573 bfa_fsm_send_event(tx, TX_E_STOP); in bna_tx_stop()
3577 bna_tx_fail(struct bna_tx *tx) in bna_tx_fail() argument
3579 tx->flags &= ~BNA_TX_F_ENET_STARTED; in bna_tx_fail()
3580 bfa_fsm_send_event(tx, TX_E_FAIL); in bna_tx_fail()
3584 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_tx_enet_start_rsp() argument
3586 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; in bna_bfi_tx_enet_start_rsp()
3591 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, in bna_bfi_tx_enet_start_rsp()
3594 tx->hw_id = cfg_rsp->hw_id; in bna_bfi_tx_enet_start_rsp()
3596 for (i = 0, qe = bfa_q_first(&tx->txq_q); in bna_bfi_tx_enet_start_rsp()
3597 i < tx->num_txq; i++, qe = bfa_q_next(qe)) { in bna_bfi_tx_enet_start_rsp()
3602 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3605 tx->bna->pcidev.pci_bar_kva in bna_bfi_tx_enet_start_rsp()
3614 bfa_fsm_send_event(tx, TX_E_STARTED); in bna_bfi_tx_enet_start_rsp()
3618 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) in bna_bfi_tx_enet_stop_rsp() argument
3620 bfa_fsm_send_event(tx, TX_E_STOPPED); in bna_bfi_tx_enet_stop_rsp()
3626 struct bna_tx *tx; in bna_bfi_bw_update_aen() local
3630 tx = (struct bna_tx *)qe; in bna_bfi_bw_update_aen()
3631 bfa_fsm_send_event(tx, TX_E_BW_UPDATE); in bna_bfi_bw_update_aen()
3690 struct bna_tx *tx; in bna_tx_create() local
3709 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); in bna_tx_create()
3710 if (!tx) in bna_tx_create()
3712 tx->bna = bna; in bna_tx_create()
3713 tx->priv = priv; in bna_tx_create()
3717 INIT_LIST_HEAD(&tx->txq_q); in bna_tx_create()
3724 list_add_tail(&txq->qe, &tx->txq_q); in bna_tx_create()
3725 txq->tx = tx; in bna_tx_create()
3734 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; in bna_tx_create()
3735 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; in bna_tx_create()
3737 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; in bna_tx_create()
3738 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; in bna_tx_create()
3739 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; in bna_tx_create()
3741 list_add_tail(&tx->qe, &tx_mod->tx_active_q); in bna_tx_create()
3743 tx->num_txq = tx_cfg->num_txq; in bna_tx_create()
3745 tx->flags = 0; in bna_tx_create()
3746 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { in bna_tx_create()
3747 switch (tx->type) { in bna_tx_create()
3749 if (!(tx->bna->tx_mod.flags & in bna_tx_create()
3751 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3754 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) in bna_tx_create()
3755 tx->flags |= BNA_TX_F_ENET_STARTED; in bna_tx_create()
3763 list_for_each(qe, &tx->txq_q) { in bna_tx_create()
3809 if (tx->tcb_setup_cbfn) in bna_tx_create()
3810 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); in bna_tx_create()
3820 tx->txf_vlan_id = 0; in bna_tx_create()
3822 bfa_fsm_set_state(tx, bna_tx_sm_stopped); in bna_tx_create()
3824 tx_mod->rid_mask |= (1 << tx->rid); in bna_tx_create()
3826 return tx; in bna_tx_create()
3829 bna_tx_free(tx); in bna_tx_create()
3834 bna_tx_destroy(struct bna_tx *tx) in bna_tx_destroy() argument
3839 list_for_each(qe, &tx->txq_q) { in bna_tx_destroy()
3841 if (tx->tcb_destroy_cbfn) in bna_tx_destroy()
3842 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); in bna_tx_destroy()
3845 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); in bna_tx_destroy()
3846 bna_tx_free(tx); in bna_tx_destroy()
3850 bna_tx_enable(struct bna_tx *tx) in bna_tx_enable() argument
3852 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) in bna_tx_enable()
3855 tx->flags |= BNA_TX_F_ENABLED; in bna_tx_enable()
3857 if (tx->flags & BNA_TX_F_ENET_STARTED) in bna_tx_enable()
3858 bfa_fsm_send_event(tx, TX_E_START); in bna_tx_enable()
3862 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, in bna_tx_disable() argument
3866 (*cbfn)(tx->bna->bnad, tx); in bna_tx_disable()
3870 tx->stop_cbfn = cbfn; in bna_tx_disable()
3871 tx->stop_cbarg = tx->bna->bnad; in bna_tx_disable()
3873 tx->flags &= ~BNA_TX_F_ENABLED; in bna_tx_disable()
3875 bfa_fsm_send_event(tx, TX_E_STOP); in bna_tx_disable()
3879 bna_tx_cleanup_complete(struct bna_tx *tx) in bna_tx_cleanup_complete() argument
3881 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); in bna_tx_cleanup_complete()
3885 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) in bna_tx_mod_cb_tx_stopped() argument
3911 tx_mod->tx = (struct bna_tx *) in bna_tx_mod_init()
3922 tx_mod->tx[i].rid = i; in bna_tx_mod_init()
3923 bfa_q_qe_init(&tx_mod->tx[i].qe); in bna_tx_mod_init()
3924 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); in bna_tx_mod_init()
3955 struct bna_tx *tx; in bna_tx_mod_start() local
3963 tx = (struct bna_tx *)qe; in bna_tx_mod_start()
3964 if (tx->type == type) in bna_tx_mod_start()
3965 bna_tx_start(tx); in bna_tx_mod_start()
3972 struct bna_tx *tx; in bna_tx_mod_stop() local
3983 tx = (struct bna_tx *)qe; in bna_tx_mod_stop()
3984 if (tx->type == type) { in bna_tx_mod_stop()
3986 bna_tx_stop(tx); in bna_tx_mod_stop()
3996 struct bna_tx *tx; in bna_tx_mod_fail() local
4003 tx = (struct bna_tx *)qe; in bna_tx_mod_fail()
4004 bna_tx_fail(tx); in bna_tx_mod_fail()
4009 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) in bna_tx_coalescing_timeo_set() argument
4014 list_for_each(qe, &tx->txq_q) { in bna_tx_coalescing_timeo_set()