Lines Matching refs:pr

204 				struct ehea_port_res *pr = &port->port_res[l];  in ehea_update_firmware_handles()  local
207 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
209 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
211 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
213 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
215 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
217 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
394 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) in ehea_refill_rq1() argument
396 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
397 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
398 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
399 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
403 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
407 pr->rq1_skba.index = index; in ehea_refill_rq1()
408 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
417 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
430 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
433 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) in ehea_init_fill_rq1() argument
435 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
436 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
439 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
450 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
453 static int ehea_refill_rq_def(struct ehea_port_res *pr, in ehea_refill_rq_def() argument
457 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
458 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
483 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
503 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
520 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
522 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
528 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq2() argument
530 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
536 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) in ehea_refill_rq3() argument
538 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
556 struct ehea_port_res *pr) in ehea_fill_skb() argument
571 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
628 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, in ehea_treat_poll_error() argument
635 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
637 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
639 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
643 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
647 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
652 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
654 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
657 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
665 struct ehea_port_res *pr, in ehea_proc_rwqes() argument
668 struct ehea_port *port = pr->port; in ehea_proc_rwqes()
669 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
672 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
673 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
674 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
675 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
676 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
677 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
712 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
722 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
733 ehea_fill_skb(dev, skb, cqe, pr); in ehea_proc_rwqes()
743 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
745 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
746 port_reset = ehea_treat_poll_error(pr, rq, cqe, in ehea_proc_rwqes()
755 pr->rx_packets += processed; in ehea_proc_rwqes()
756 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
758 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); in ehea_proc_rwqes()
759 ehea_refill_rq2(pr, processed_rq2); in ehea_proc_rwqes()
760 ehea_refill_rq3(pr, processed_rq3); in ehea_proc_rwqes()
772 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag() local
773 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
785 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs() local
788 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
790 atomic_dec(&pr->swqe_avail); in check_sqs()
798 ehea_post_swqe(pr->qp, swqe); in check_sqs()
801 pr->sq_restart_flag == 0, in check_sqs()
806 ehea_schedule_port_reset(pr->port); in check_sqs()
813 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) in ehea_proc_cqes() argument
816 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
822 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
823 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
833 pr->sq_restart_flag = 1; in ehea_proc_cqes()
842 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
847 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
852 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
859 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
861 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
871 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
874 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
877 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
882 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
891 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, in ehea_poll() local
893 struct net_device *dev = pr->port->netdev; in ehea_poll()
899 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
900 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
904 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
905 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
906 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
907 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
909 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
910 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
918 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); in ehea_poll()
919 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
938 struct ehea_port_res *pr = param; in ehea_recv_irq_handler() local
940 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
1275 static int ehea_fill_port_res(struct ehea_port_res *pr) in ehea_fill_port_res() argument
1278 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1280 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1282 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1284 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1292 struct ehea_port_res *pr; in ehea_reg_interrupts() local
1314 pr = &port->port_res[i]; in ehea_reg_interrupts()
1315 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1317 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1319 0, pr->int_send_name, pr); in ehea_reg_interrupts()
1322 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1327 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1350 struct ehea_port_res *pr; in ehea_free_interrupts() local
1356 pr = &port->port_res[i]; in ehea_free_interrupts()
1357 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1360 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1418 static int ehea_gen_smrs(struct ehea_port_res *pr) in ehea_gen_smrs() argument
1421 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1423 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1427 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1434 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1440 static int ehea_rem_smrs(struct ehea_port_res *pr) in ehea_rem_smrs() argument
1442 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1443 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1464 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, in ehea_init_port_res() argument
1473 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1474 tx_packets = pr->tx_packets; in ehea_init_port_res()
1475 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1476 rx_packets = pr->rx_packets; in ehea_init_port_res()
1478 memset(pr, 0, sizeof(struct ehea_port_res)); in ehea_init_port_res()
1480 pr->tx_bytes = rx_bytes; in ehea_init_port_res()
1481 pr->tx_packets = tx_packets; in ehea_init_port_res()
1482 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1483 pr->rx_packets = rx_packets; in ehea_init_port_res()
1485 pr->port = port; in ehea_init_port_res()
1487 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1488 if (!pr->eq) { in ehea_init_port_res()
1493 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1494 pr->eq->fw_handle, in ehea_init_port_res()
1496 if (!pr->recv_cq) { in ehea_init_port_res()
1501 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1502 pr->eq->fw_handle, in ehea_init_port_res()
1504 if (!pr->send_cq) { in ehea_init_port_res()
1511 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1512 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1536 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1537 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1540 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1541 if (!pr->qp) { in ehea_init_port_res()
1555 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1557 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1558 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1559 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1560 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1564 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1565 if (ehea_gen_smrs(pr) != 0) { in ehea_init_port_res()
1570 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1574 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); in ehea_init_port_res()
1581 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1582 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1583 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1584 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1585 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1586 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1587 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1588 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1593 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) in ehea_clean_portres() argument
1597 if (pr->qp) in ehea_clean_portres()
1598 netif_napi_del(&pr->napi); in ehea_clean_portres()
1600 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1603 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1604 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1605 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1607 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1608 if (pr->rq1_skba.arr[i]) in ehea_clean_portres()
1609 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1611 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1612 if (pr->rq2_skba.arr[i]) in ehea_clean_portres()
1613 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1615 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1616 if (pr->rq3_skba.arr[i]) in ehea_clean_portres()
1617 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1619 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1620 if (pr->sq_skba.arr[i]) in ehea_clean_portres()
1621 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1623 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1624 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1625 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1626 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1627 ret = ehea_rem_smrs(pr); in ehea_clean_portres()
2057 struct ehea_port_res *pr; in ehea_start_xmit() local
2060 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2063 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2065 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2072 pr->tx_packets++; in ehea_start_xmit()
2073 pr->tx_bytes += skb->len; in ehea_start_xmit()
2077 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2081 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2085 pr->swqe_ll_count = 0; in ehea_start_xmit()
2087 pr->swqe_ll_count += 1; in ehea_start_xmit()
2091 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2093 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2094 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2096 pr->sq_skba.index++; in ehea_start_xmit()
2097 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2099 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2103 pr->swqe_id_counter += 1; in ehea_start_xmit()
2106 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2115 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2117 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2118 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2528 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq() local
2529 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2533 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2562 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps() local
2563 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2598 dret = ehea_rem_smrs(pr); in ehea_stop_qps()
2612 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) in ehea_update_rqs() argument
2617 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2618 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2620 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2664 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps() local
2665 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2667 ret = ehea_gen_smrs(pr); in ehea_restart_qps()
2673 ehea_update_rqs(qp, pr); in ehea_restart_qps()
2705 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2706 ehea_refill_rq2(pr, 0); in ehea_restart_qps()
2707 ehea_refill_rq3(pr, 0); in ehea_restart_qps()