Lines Matching refs:port_priv
75 struct ib_mad_port_private *port_priv,
204 struct ib_mad_port_private *port_priv; in ib_register_mad_agent() local
318 port_priv = ib_get_mad_port(device, port_num); in ib_register_mad_agent()
319 if (!port_priv) { in ib_register_mad_agent()
327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
350 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
371 spin_lock_irqsave(&port_priv->reg_lock, flags); in ib_register_mad_agent()
381 class = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
395 vendor = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
416 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); in ib_register_mad_agent()
417 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in ib_register_mad_agent()
422 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in ib_register_mad_agent()
489 struct ib_mad_port_private *port_priv; in ib_register_mad_snoop() local
505 port_priv = ib_get_mad_port(device, port_num); in ib_register_mad_snoop()
506 if (!port_priv) { in ib_register_mad_snoop()
518 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop()
523 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop()
528 &port_priv->qp_info[qpn], in ib_register_mad_snoop()
559 struct ib_mad_port_private *port_priv; in unregister_mad_agent() local
569 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
572 spin_lock_irqsave(&port_priv->reg_lock, flags); in unregister_mad_agent()
575 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in unregister_mad_agent()
577 flush_workqueue(port_priv->wq); in unregister_mad_agent()
726 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) in port_mad_size() argument
728 return rdma_max_mad_size(port_priv->device, port_priv->port_num); in port_mad_size()
750 struct ib_mad_port_private *port_priv; in handle_outgoing_dr_smp() local
756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
759 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
760 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
870 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, in handle_outgoing_dr_smp()
872 if (port_priv) { in handle_outgoing_dr_smp()
874 recv_mad_agent = find_mad_agent(port_priv, in handle_outgoing_dr_smp()
877 if (!port_priv || !recv_mad_agent) { in handle_outgoing_dr_smp()
906 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1449 struct ib_mad_port_private *port_priv; in add_nonoui_reg_req() local
1454 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1455 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; in add_nonoui_reg_req()
1510 struct ib_mad_port_private *port_priv; in add_oui_reg_req() local
1520 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1521 vendor_table = &port_priv->version[ in add_oui_reg_req()
1610 struct ib_mad_port_private *port_priv; in remove_mad_reg_req() local
1626 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1628 class = port_priv->version[ in remove_mad_reg_req()
1646 port_priv->version[ in remove_mad_reg_req()
1659 vendor = port_priv->version[ in remove_mad_reg_req()
1691 port_priv->version[ in remove_mad_reg_req()
1706 find_mad_agent(struct ib_mad_port_private *port_priv, in find_mad_agent() argument
1712 spin_lock_irqsave(&port_priv->reg_lock, flags); in find_mad_agent()
1722 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { in find_mad_agent()
1743 class = port_priv->version[ in find_mad_agent()
1756 vendor = port_priv->version[ in find_mad_agent()
1781 dev_notice(&port_priv->device->dev, in find_mad_agent()
1783 &mad_agent->agent, port_priv->port_num); in find_mad_agent()
1788 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in find_mad_agent()
2018 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, in handle_ib_smi() argument
2029 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
2031 port_priv->device->phys_port_cnt) == in handle_ib_smi()
2041 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
2045 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) in handle_ib_smi()
2047 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_ib_smi()
2056 port_priv->device, in handle_ib_smi()
2103 handle_opa_smi(struct ib_mad_port_private *port_priv, in handle_opa_smi() argument
2114 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
2116 port_priv->device->phys_port_cnt) == in handle_opa_smi()
2126 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
2130 if (opa_smi_check_local_smp(smp, port_priv->device) == in handle_opa_smi()
2134 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_opa_smi()
2144 port_priv->device, in handle_opa_smi()
2157 handle_smi(struct ib_mad_port_private *port_priv, in handle_smi() argument
2169 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2172 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2175 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, in ib_mad_recv_done_handler() argument
2193 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done_handler()
2194 qp_info->port_priv->port_num); in ib_mad_recv_done_handler()
2199 ib_dma_unmap_single(port_priv->device, in ib_mad_recv_done_handler()
2229 dev_err(&port_priv->device->dev, in ib_mad_recv_done_handler()
2234 if (rdma_cap_ib_switch(port_priv->device)) in ib_mad_recv_done_handler()
2237 port_num = port_priv->port_num; in ib_mad_recv_done_handler()
2241 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done_handler()
2248 if (port_priv->device->process_mad) { in ib_mad_recv_done_handler()
2249 ret = port_priv->device->process_mad(port_priv->device, 0, in ib_mad_recv_done_handler()
2250 port_priv->port_num, in ib_mad_recv_done_handler()
2266 port_priv->device, in ib_mad_recv_done_handler()
2275 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); in ib_mad_recv_done_handler()
2286 port_priv->device, port_num, in ib_mad_recv_done_handler()
2317 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2352 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2416 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, in ib_mad_send_done_handler() argument
2468 dev_err(&port_priv->device->dev, in ib_mad_send_done_handler()
2493 static void mad_error_handler(struct ib_mad_port_private *port_priv, in mad_error_handler() argument
2526 ib_mad_send_done_handler(port_priv, wc); in mad_error_handler()
2528 ib_mad_send_done_handler(port_priv, wc); in mad_error_handler()
2541 dev_err(&port_priv->device->dev, in mad_error_handler()
2547 ib_mad_send_done_handler(port_priv, wc); in mad_error_handler()
2556 struct ib_mad_port_private *port_priv; in ib_mad_completion_handler() local
2559 port_priv = container_of(work, struct ib_mad_port_private, work); in ib_mad_completion_handler()
2560 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_completion_handler()
2562 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { in ib_mad_completion_handler()
2566 ib_mad_send_done_handler(port_priv, &wc); in ib_mad_completion_handler()
2569 ib_mad_recv_done_handler(port_priv, &wc); in ib_mad_completion_handler()
2576 mad_error_handler(port_priv, &wc); in ib_mad_completion_handler()
2693 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2694 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2832 port_priv->wq, in timeout_sends()
2860 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_thread_completion_handler() local
2864 if (!list_empty(&port_priv->port_list)) in ib_mad_thread_completion_handler()
2865 queue_work(port_priv->wq, &port_priv->work); in ib_mad_thread_completion_handler()
2883 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2896 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2899 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2906 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2910 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2930 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2935 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2969 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2982 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) in ib_mad_port_start() argument
2991 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2996 ret = ib_find_pkey(port_priv->device, port_priv->port_num, in ib_mad_port_start()
3002 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
3016 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3025 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3035 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3042 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_port_start()
3044 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3051 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
3054 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
3056 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3071 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
3085 static void init_mad_qp(struct ib_mad_port_private *port_priv, in init_mad_qp() argument
3088 qp_info->port_priv = port_priv; in init_mad_qp()
3105 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
3106 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
3113 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
3116 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
3118 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
3150 struct ib_mad_port_private *port_priv; in ib_mad_port_open() local
3164 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); in ib_mad_port_open()
3165 if (!port_priv) { in ib_mad_port_open()
3170 port_priv->device = device; in ib_mad_port_open()
3171 port_priv->port_num = port_num; in ib_mad_port_open()
3172 spin_lock_init(&port_priv->reg_lock); in ib_mad_port_open()
3173 INIT_LIST_HEAD(&port_priv->agent_list); in ib_mad_port_open()
3174 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
3175 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
3183 port_priv->cq = ib_create_cq(port_priv->device, in ib_mad_port_open()
3185 NULL, port_priv, &cq_attr); in ib_mad_port_open()
3186 if (IS_ERR(port_priv->cq)) { in ib_mad_port_open()
3188 ret = PTR_ERR(port_priv->cq); in ib_mad_port_open()
3192 port_priv->pd = ib_alloc_pd(device); in ib_mad_port_open()
3193 if (IS_ERR(port_priv->pd)) { in ib_mad_port_open()
3195 ret = PTR_ERR(port_priv->pd); in ib_mad_port_open()
3200 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
3204 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3209 port_priv->wq = create_singlethread_workqueue(name); in ib_mad_port_open()
3210 if (!port_priv->wq) { in ib_mad_port_open()
3214 INIT_WORK(&port_priv->work, ib_mad_completion_handler); in ib_mad_port_open()
3217 list_add_tail(&port_priv->port_list, &ib_mad_port_list); in ib_mad_port_open()
3220 ret = ib_mad_port_start(port_priv); in ib_mad_port_open()
3230 list_del_init(&port_priv->port_list); in ib_mad_port_open()
3233 destroy_workqueue(port_priv->wq); in ib_mad_port_open()
3235 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3237 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3239 ib_dealloc_pd(port_priv->pd); in ib_mad_port_open()
3241 ib_destroy_cq(port_priv->cq); in ib_mad_port_open()
3242 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3243 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3245 kfree(port_priv); in ib_mad_port_open()
3257 struct ib_mad_port_private *port_priv; in ib_mad_port_close() local
3261 port_priv = __ib_get_mad_port(device, port_num); in ib_mad_port_close()
3262 if (port_priv == NULL) { in ib_mad_port_close()
3267 list_del_init(&port_priv->port_list); in ib_mad_port_close()
3270 destroy_workqueue(port_priv->wq); in ib_mad_port_close()
3271 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3272 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3273 ib_dealloc_pd(port_priv->pd); in ib_mad_port_close()
3274 ib_destroy_cq(port_priv->cq); in ib_mad_port_close()
3275 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3276 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()
3279 kfree(port_priv); in ib_mad_port_close()