Lines Matching refs:srp_dev
225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, in srp_alloc_iu()
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) in srp_alloc_iu()
248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, in srp_free_iu()
269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, in srp_init_qp()
297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, in srp_new_cm_id()
315 struct srp_device *dev = target->srp_host->srp_dev; in srp_alloc_fmr_pool()
450 struct srp_device *dev = target->srp_host->srp_dev; in srp_alloc_fr_pool()
494 struct srp_device *dev = target->srp_host->srp_dev; in srp_create_ch_ib()
603 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_ch_ib()
680 target->srp_host->srp_dev->dev, in srp_lookup_path()
786 &target->srp_host->srp_dev->dev->node_guid, 8); in srp_send_req()
833 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_req_data()
863 struct srp_device *srp_dev = target->srp_host->srp_dev; in srp_alloc_req_data() local
864 struct ib_device *ibdev = srp_dev->dev; in srp_alloc_req_data()
881 if (srp_dev->use_fast_reg) in srp_alloc_req_data()
885 req->map_page = kmalloc(srp_dev->max_pages_per_mr * in srp_alloc_req_data()
1058 struct srp_device *dev = target->srp_host->srp_dev; in srp_unmap_data()
1296 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_finish_fr()
1347 ret = target->srp_host->srp_dev->use_fast_reg ? in srp_finish_mapping()
1374 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_sg_entry()
1459 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_sg()
1534 dev = target->srp_host->srp_dev; in srp_map_data()
1785 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_response_common()
1850 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_handle_recv()
2022 dev = target->srp_host->srp_dev->dev; in srp_queuecommand()
2417 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_send_tsk_mgmt()
2651 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); in show_local_ib_device()
2780 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) in srp_add_target()
3139 struct srp_device *srp_dev = host->srp_dev; in srp_create_target() local
3140 struct ib_device *ibdev = srp_dev->dev; in srp_create_target()
3160 target->lkey = host->srp_dev->mr->lkey; in srp_create_target()
3161 target->rkey = host->srp_dev->mr->rkey; in srp_create_target()
3196 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && in srp_create_target()
3334 return sprintf(buf, "%s\n", host->srp_dev->dev->name); in show_ibdev()
3361 host->srp_dev = device; in srp_add_port()
3390 struct srp_device *srp_dev; in srp_add_one() local
3405 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); in srp_add_one()
3406 if (!srp_dev) in srp_add_one()
3409 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && in srp_add_one()
3411 srp_dev->has_fr = (dev_attr->device_cap_flags & in srp_add_one()
3413 if (!srp_dev->has_fmr && !srp_dev->has_fr) in srp_add_one()
3416 srp_dev->use_fast_reg = (srp_dev->has_fr && in srp_add_one()
3417 (!srp_dev->has_fmr || prefer_fr)); in srp_add_one()
3425 srp_dev->mr_page_size = 1 << mr_page_shift; in srp_add_one()
3426 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); in srp_add_one()
3428 do_div(max_pages_per_mr, srp_dev->mr_page_size); in srp_add_one()
3429 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, in srp_add_one()
3431 if (srp_dev->use_fast_reg) { in srp_add_one()
3432 srp_dev->max_pages_per_mr = in srp_add_one()
3433 min_t(u32, srp_dev->max_pages_per_mr, in srp_add_one()
3436 srp_dev->mr_max_size = srp_dev->mr_page_size * in srp_add_one()
3437 srp_dev->max_pages_per_mr; in srp_add_one()
3441 srp_dev->max_pages_per_mr, srp_dev->mr_max_size); in srp_add_one()
3443 INIT_LIST_HEAD(&srp_dev->dev_list); in srp_add_one()
3445 srp_dev->dev = device; in srp_add_one()
3446 srp_dev->pd = ib_alloc_pd(device); in srp_add_one()
3447 if (IS_ERR(srp_dev->pd)) in srp_add_one()
3450 srp_dev->mr = ib_get_dma_mr(srp_dev->pd, in srp_add_one()
3454 if (IS_ERR(srp_dev->mr)) in srp_add_one()
3466 host = srp_add_port(srp_dev, p); in srp_add_one()
3468 list_add_tail(&host->list, &srp_dev->dev_list); in srp_add_one()
3471 ib_set_client_data(device, &srp_client, srp_dev); in srp_add_one()
3476 ib_dealloc_pd(srp_dev->pd); in srp_add_one()
3479 kfree(srp_dev); in srp_add_one()
3487 struct srp_device *srp_dev; in srp_remove_one() local
3491 srp_dev = ib_get_client_data(device, &srp_client); in srp_remove_one()
3492 if (!srp_dev) in srp_remove_one()
3495 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_remove_one()
3520 ib_dereg_mr(srp_dev->mr); in srp_remove_one()
3521 ib_dealloc_pd(srp_dev->pd); in srp_remove_one()
3523 kfree(srp_dev); in srp_remove_one()