Lines Matching refs:mdev

137 static int mthca_tune_pci(struct mthca_dev *mdev)  in mthca_tune_pci()  argument
143 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) { in mthca_tune_pci()
144 if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) { in mthca_tune_pci()
145 mthca_err(mdev, "Couldn't set PCI-X max read count, " in mthca_tune_pci()
149 } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) in mthca_tune_pci()
150 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); in mthca_tune_pci()
152 if (pci_is_pcie(mdev->pdev)) { in mthca_tune_pci()
153 if (pcie_set_readrq(mdev->pdev, 4096)) { in mthca_tune_pci()
154 mthca_err(mdev, "Couldn't write PCI Express read request, " in mthca_tune_pci()
158 } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE) in mthca_tune_pci()
159 mthca_info(mdev, "No PCI Express capability, " in mthca_tune_pci()
165 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) in mthca_dev_lim() argument
169 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; in mthca_dev_lim()
170 err = mthca_QUERY_DEV_LIM(mdev, dev_lim); in mthca_dev_lim()
172 mthca_err(mdev, "QUERY_DEV_LIM command returned %d" in mthca_dev_lim()
177 mthca_err(mdev, "HCA minimum page size of %d bigger than " in mthca_dev_lim()
183 mthca_err(mdev, "HCA has %d ports, but we only support %d, " in mthca_dev_lim()
189 if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) { in mthca_dev_lim()
190 mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than " in mthca_dev_lim()
193 (unsigned long long)pci_resource_len(mdev->pdev, 2)); in mthca_dev_lim()
197 mdev->limits.num_ports = dev_lim->num_ports; in mthca_dev_lim()
198 mdev->limits.vl_cap = dev_lim->max_vl; in mthca_dev_lim()
199 mdev->limits.mtu_cap = dev_lim->max_mtu; in mthca_dev_lim()
200 mdev->limits.gid_table_len = dev_lim->max_gids; in mthca_dev_lim()
201 mdev->limits.pkey_table_len = dev_lim->max_pkeys; in mthca_dev_lim()
202 mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; in mthca_dev_lim()
208 mdev->limits.max_sg = min_t(int, dev_lim->max_sg, in mthca_dev_lim()
211 (mthca_is_memfree(mdev) ? in mthca_dev_lim()
215 mdev->limits.max_wqes = dev_lim->max_qp_sz; in mthca_dev_lim()
216 mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; in mthca_dev_lim()
217 mdev->limits.reserved_qps = dev_lim->reserved_qps; in mthca_dev_lim()
218 mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; in mthca_dev_lim()
219 mdev->limits.reserved_srqs = dev_lim->reserved_srqs; in mthca_dev_lim()
220 mdev->limits.reserved_eecs = dev_lim->reserved_eecs; in mthca_dev_lim()
221 mdev->limits.max_desc_sz = dev_lim->max_desc_sz; in mthca_dev_lim()
222 mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); in mthca_dev_lim()
228 mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; in mthca_dev_lim()
229 mdev->limits.reserved_cqs = dev_lim->reserved_cqs; in mthca_dev_lim()
230 mdev->limits.reserved_eqs = dev_lim->reserved_eqs; in mthca_dev_lim()
231 mdev->limits.reserved_mtts = dev_lim->reserved_mtts; in mthca_dev_lim()
232 mdev->limits.reserved_mrws = dev_lim->reserved_mrws; in mthca_dev_lim()
233 mdev->limits.reserved_uars = dev_lim->reserved_uars; in mthca_dev_lim()
234 mdev->limits.reserved_pds = dev_lim->reserved_pds; in mthca_dev_lim()
235 mdev->limits.port_width_cap = dev_lim->max_port_width; in mthca_dev_lim()
236 mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); in mthca_dev_lim()
237 mdev->limits.flags = dev_lim->flags; in mthca_dev_lim()
245 mdev->limits.stat_rate_support = dev_lim->stat_rate_support; in mthca_dev_lim()
246 else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) in mthca_dev_lim()
247 mdev->limits.stat_rate_support = 0xf; in mthca_dev_lim()
249 mdev->limits.stat_rate_support = 0x3; in mthca_dev_lim()
258 mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | in mthca_dev_lim()
264 mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; in mthca_dev_lim()
267 mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; in mthca_dev_lim()
270 mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; in mthca_dev_lim()
273 mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; in mthca_dev_lim()
276 mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; in mthca_dev_lim()
279 mdev->mthca_flags |= MTHCA_FLAG_SRQ; in mthca_dev_lim()
281 if (mthca_is_memfree(mdev)) in mthca_dev_lim()
283 mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; in mthca_dev_lim()
288 static int mthca_init_tavor(struct mthca_dev *mdev) in mthca_init_tavor() argument
296 err = mthca_SYS_EN(mdev); in mthca_init_tavor()
298 mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err); in mthca_init_tavor()
302 err = mthca_QUERY_FW(mdev); in mthca_init_tavor()
304 mthca_err(mdev, "QUERY_FW command returned %d," in mthca_init_tavor()
308 err = mthca_QUERY_DDR(mdev); in mthca_init_tavor()
310 mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err); in mthca_init_tavor()
314 err = mthca_dev_lim(mdev, &dev_lim); in mthca_init_tavor()
316 mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err); in mthca_init_tavor()
323 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_init_tavor()
326 size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); in mthca_init_tavor()
332 err = mthca_INIT_HCA(mdev, &init_hca); in mthca_init_tavor()
334 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); in mthca_init_tavor()
341 mthca_SYS_DIS(mdev); in mthca_init_tavor()
346 static int mthca_load_fw(struct mthca_dev *mdev) in mthca_load_fw() argument
352 mdev->fw.arbel.fw_icm = in mthca_load_fw()
353 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, in mthca_load_fw()
355 if (!mdev->fw.arbel.fw_icm) { in mthca_load_fw()
356 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); in mthca_load_fw()
360 err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm); in mthca_load_fw()
362 mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err); in mthca_load_fw()
365 err = mthca_RUN_FW(mdev); in mthca_load_fw()
367 mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err); in mthca_load_fw()
374 mthca_UNMAP_FA(mdev); in mthca_load_fw()
377 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); in mthca_load_fw()
381 static int mthca_init_icm(struct mthca_dev *mdev, in mthca_init_icm() argument
389 err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); in mthca_init_icm()
391 mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err); in mthca_init_icm()
395 mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", in mthca_init_icm()
399 mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, in mthca_init_icm()
401 if (!mdev->fw.arbel.aux_icm) { in mthca_init_icm()
402 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); in mthca_init_icm()
406 err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm); in mthca_init_icm()
408 mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err); in mthca_init_icm()
412 err = mthca_map_eq_icm(mdev, init_hca->eqc_base); in mthca_init_icm()
414 mthca_err(mdev, "Failed to map EQ context memory, aborting.\n"); in mthca_init_icm()
419 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size, in mthca_init_icm()
420 dma_get_cache_alignment()) / mdev->limits.mtt_seg_size; in mthca_init_icm()
422 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, in mthca_init_icm()
423 mdev->limits.mtt_seg_size, in mthca_init_icm()
424 mdev->limits.num_mtt_segs, in mthca_init_icm()
425 mdev->limits.reserved_mtts, in mthca_init_icm()
427 if (!mdev->mr_table.mtt_table) { in mthca_init_icm()
428 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); in mthca_init_icm()
433 mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, in mthca_init_icm()
435 mdev->limits.num_mpts, in mthca_init_icm()
436 mdev->limits.reserved_mrws, in mthca_init_icm()
438 if (!mdev->mr_table.mpt_table) { in mthca_init_icm()
439 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); in mthca_init_icm()
444 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm()
446 mdev->limits.num_qps, in mthca_init_icm()
447 mdev->limits.reserved_qps, in mthca_init_icm()
449 if (!mdev->qp_table.qp_table) { in mthca_init_icm()
450 mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); in mthca_init_icm()
455 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm()
457 mdev->limits.num_qps, in mthca_init_icm()
458 mdev->limits.reserved_qps, in mthca_init_icm()
460 if (!mdev->qp_table.eqp_table) { in mthca_init_icm()
461 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); in mthca_init_icm()
466 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm()
468 mdev->limits.num_qps << in mthca_init_icm()
469 mdev->qp_table.rdb_shift, 0, in mthca_init_icm()
471 if (!mdev->qp_table.rdb_table) { in mthca_init_icm()
472 mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); in mthca_init_icm()
477 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, in mthca_init_icm()
479 mdev->limits.num_cqs, in mthca_init_icm()
480 mdev->limits.reserved_cqs, in mthca_init_icm()
482 if (!mdev->cq_table.table) { in mthca_init_icm()
483 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); in mthca_init_icm()
488 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { in mthca_init_icm()
489 mdev->srq_table.table = in mthca_init_icm()
490 mthca_alloc_icm_table(mdev, init_hca->srqc_base, in mthca_init_icm()
492 mdev->limits.num_srqs, in mthca_init_icm()
493 mdev->limits.reserved_srqs, in mthca_init_icm()
495 if (!mdev->srq_table.table) { in mthca_init_icm()
496 mthca_err(mdev, "Failed to map SRQ context memory, " in mthca_init_icm()
508 mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, in mthca_init_icm()
510 mdev->limits.num_mgms + in mthca_init_icm()
511 mdev->limits.num_amgms, in mthca_init_icm()
512 mdev->limits.num_mgms + in mthca_init_icm()
513 mdev->limits.num_amgms, in mthca_init_icm()
515 if (!mdev->mcg_table.table) { in mthca_init_icm()
516 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); in mthca_init_icm()
524 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_init_icm()
525 mthca_free_icm_table(mdev, mdev->srq_table.table); in mthca_init_icm()
528 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_init_icm()
531 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_init_icm()
534 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_init_icm()
537 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_init_icm()
540 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); in mthca_init_icm()
543 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); in mthca_init_icm()
546 mthca_unmap_eq_icm(mdev); in mthca_init_icm()
549 mthca_UNMAP_ICM_AUX(mdev); in mthca_init_icm()
552 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); in mthca_init_icm()
557 static void mthca_free_icms(struct mthca_dev *mdev) in mthca_free_icms() argument
560 mthca_free_icm_table(mdev, mdev->mcg_table.table); in mthca_free_icms()
561 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_free_icms()
562 mthca_free_icm_table(mdev, mdev->srq_table.table); in mthca_free_icms()
563 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_free_icms()
564 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_free_icms()
565 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_free_icms()
566 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_free_icms()
567 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); in mthca_free_icms()
568 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); in mthca_free_icms()
569 mthca_unmap_eq_icm(mdev); in mthca_free_icms()
571 mthca_UNMAP_ICM_AUX(mdev); in mthca_free_icms()
572 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); in mthca_free_icms()
575 static int mthca_init_arbel(struct mthca_dev *mdev) in mthca_init_arbel() argument
583 err = mthca_QUERY_FW(mdev); in mthca_init_arbel()
585 mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err); in mthca_init_arbel()
589 err = mthca_ENABLE_LAM(mdev); in mthca_init_arbel()
591 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); in mthca_init_arbel()
592 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; in mthca_init_arbel()
594 mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err); in mthca_init_arbel()
598 err = mthca_load_fw(mdev); in mthca_init_arbel()
600 mthca_err(mdev, "Loading FW returned %d, aborting.\n", err); in mthca_init_arbel()
604 err = mthca_dev_lim(mdev, &dev_lim); in mthca_init_arbel()
606 mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err); in mthca_init_arbel()
613 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) in mthca_init_arbel()
616 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); in mthca_init_arbel()
622 err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); in mthca_init_arbel()
626 err = mthca_INIT_HCA(mdev, &init_hca); in mthca_init_arbel()
628 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); in mthca_init_arbel()
635 mthca_free_icms(mdev); in mthca_init_arbel()
638 mthca_UNMAP_FA(mdev); in mthca_init_arbel()
639 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); in mthca_init_arbel()
642 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) in mthca_init_arbel()
643 mthca_DISABLE_LAM(mdev); in mthca_init_arbel()
648 static void mthca_close_hca(struct mthca_dev *mdev) in mthca_close_hca() argument
650 mthca_CLOSE_HCA(mdev, 0); in mthca_close_hca()
652 if (mthca_is_memfree(mdev)) { in mthca_close_hca()
653 mthca_free_icms(mdev); in mthca_close_hca()
655 mthca_UNMAP_FA(mdev); in mthca_close_hca()
656 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); in mthca_close_hca()
658 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) in mthca_close_hca()
659 mthca_DISABLE_LAM(mdev); in mthca_close_hca()
661 mthca_SYS_DIS(mdev); in mthca_close_hca()
664 static int mthca_init_hca(struct mthca_dev *mdev) in mthca_init_hca() argument
669 if (mthca_is_memfree(mdev)) in mthca_init_hca()
670 err = mthca_init_arbel(mdev); in mthca_init_hca()
672 err = mthca_init_tavor(mdev); in mthca_init_hca()
677 err = mthca_QUERY_ADAPTER(mdev, &adapter); in mthca_init_hca()
679 mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err); in mthca_init_hca()
683 mdev->eq_table.inta_pin = adapter.inta_pin; in mthca_init_hca()
684 if (!mthca_is_memfree(mdev)) in mthca_init_hca()
685 mdev->rev_id = adapter.revision_id; in mthca_init_hca()
686 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); in mthca_init_hca()
691 mthca_close_hca(mdev); in mthca_init_hca()
852 static int mthca_enable_msi_x(struct mthca_dev *mdev) in mthca_enable_msi_x() argument
861 err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries)); in mthca_enable_msi_x()
865 mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; in mthca_enable_msi_x()
866 mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; in mthca_enable_msi_x()
867 mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector; in mthca_enable_msi_x()
904 struct mthca_dev *mdev; in __mthca_init_one() local
967 mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); in __mthca_init_one()
968 if (!mdev) { in __mthca_init_one()
975 mdev->pdev = pdev; in __mthca_init_one()
977 mdev->mthca_flags = mthca_hca_table[hca_type].flags; in __mthca_init_one()
979 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; in __mthca_init_one()
986 err = mthca_reset(mdev); in __mthca_init_one()
988 mthca_err(mdev, "Failed to reset HCA, aborting.\n"); in __mthca_init_one()
992 if (mthca_cmd_init(mdev)) { in __mthca_init_one()
993 mthca_err(mdev, "Failed to init command interface, aborting.\n"); in __mthca_init_one()
997 err = mthca_tune_pci(mdev); in __mthca_init_one()
1001 err = mthca_init_hca(mdev); in __mthca_init_one()
1005 if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { in __mthca_init_one()
1006 mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n", in __mthca_init_one()
1007 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, in __mthca_init_one()
1008 (int) (mdev->fw_ver & 0xffff), in __mthca_init_one()
1012 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); in __mthca_init_one()
1015 if (msi_x && !mthca_enable_msi_x(mdev)) in __mthca_init_one()
1016 mdev->mthca_flags |= MTHCA_FLAG_MSI_X; in __mthca_init_one()
1018 err = mthca_setup_hca(mdev); in __mthca_init_one()
1019 if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) { in __mthca_init_one()
1020 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) in __mthca_init_one()
1022 mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X; in __mthca_init_one()
1024 err = mthca_setup_hca(mdev); in __mthca_init_one()
1030 err = mthca_register_device(mdev); in __mthca_init_one()
1034 err = mthca_create_agents(mdev); in __mthca_init_one()
1038 pci_set_drvdata(pdev, mdev); in __mthca_init_one()
1039 mdev->hca_type = hca_type; in __mthca_init_one()
1041 mdev->active = true; in __mthca_init_one()
1046 mthca_unregister_device(mdev); in __mthca_init_one()
1049 mthca_cleanup_mcg_table(mdev); in __mthca_init_one()
1050 mthca_cleanup_av_table(mdev); in __mthca_init_one()
1051 mthca_cleanup_qp_table(mdev); in __mthca_init_one()
1052 mthca_cleanup_srq_table(mdev); in __mthca_init_one()
1053 mthca_cleanup_cq_table(mdev); in __mthca_init_one()
1054 mthca_cmd_use_polling(mdev); in __mthca_init_one()
1055 mthca_cleanup_eq_table(mdev); in __mthca_init_one()
1057 mthca_pd_free(mdev, &mdev->driver_pd); in __mthca_init_one()
1059 mthca_cleanup_mr_table(mdev); in __mthca_init_one()
1060 mthca_cleanup_pd_table(mdev); in __mthca_init_one()
1061 mthca_cleanup_uar_table(mdev); in __mthca_init_one()
1064 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) in __mthca_init_one()
1067 mthca_close_hca(mdev); in __mthca_init_one()
1070 mthca_cmd_cleanup(mdev); in __mthca_init_one()
1073 ib_dealloc_device(&mdev->ib_dev); in __mthca_init_one()
1086 struct mthca_dev *mdev = pci_get_drvdata(pdev); in __mthca_remove_one() local
1089 if (mdev) { in __mthca_remove_one()
1090 mthca_free_agents(mdev); in __mthca_remove_one()
1091 mthca_unregister_device(mdev); in __mthca_remove_one()
1093 for (p = 1; p <= mdev->limits.num_ports; ++p) in __mthca_remove_one()
1094 mthca_CLOSE_IB(mdev, p); in __mthca_remove_one()
1096 mthca_cleanup_mcg_table(mdev); in __mthca_remove_one()
1097 mthca_cleanup_av_table(mdev); in __mthca_remove_one()
1098 mthca_cleanup_qp_table(mdev); in __mthca_remove_one()
1099 mthca_cleanup_srq_table(mdev); in __mthca_remove_one()
1100 mthca_cleanup_cq_table(mdev); in __mthca_remove_one()
1101 mthca_cmd_use_polling(mdev); in __mthca_remove_one()
1102 mthca_cleanup_eq_table(mdev); in __mthca_remove_one()
1104 mthca_pd_free(mdev, &mdev->driver_pd); in __mthca_remove_one()
1106 mthca_cleanup_mr_table(mdev); in __mthca_remove_one()
1107 mthca_cleanup_pd_table(mdev); in __mthca_remove_one()
1109 iounmap(mdev->kar); in __mthca_remove_one()
1110 mthca_uar_free(mdev, &mdev->driver_uar); in __mthca_remove_one()
1111 mthca_cleanup_uar_table(mdev); in __mthca_remove_one()
1112 mthca_close_hca(mdev); in __mthca_remove_one()
1113 mthca_cmd_cleanup(mdev); in __mthca_remove_one()
1115 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) in __mthca_remove_one()
1118 ib_dealloc_device(&mdev->ib_dev); in __mthca_remove_one()
1127 struct mthca_dev *mdev; in __mthca_restart_one() local
1130 mdev = pci_get_drvdata(pdev); in __mthca_restart_one()
1131 if (!mdev) in __mthca_restart_one()
1133 hca_type = mdev->hca_type; in __mthca_restart_one()