/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_mr.c | 42 struct qib_mregion mr; /* must be last */ member 50 static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, in init_qib_mregion() argument 58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); in init_qib_mregion() 59 if (!mr->map[i]) in init_qib_mregion() 62 mr->mapsz = m; in init_qib_mregion() 63 init_completion(&mr->comp); in init_qib_mregion() 65 atomic_set(&mr->refcount, 1); in init_qib_mregion() 66 mr->pd = pd; in init_qib_mregion() 67 mr->max_segs = count; in init_qib_mregion() 72 kfree(mr->map[--i]); in init_qib_mregion() [all …]
|
D | qib_keys.c | 49 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) in qib_alloc_lkey() argument 55 struct qib_ibdev *dev = to_idev(mr->pd->device); in qib_alloc_lkey() 66 qib_get_mr(mr); in qib_alloc_lkey() 67 rcu_assign_pointer(dev->dma_mr, mr); in qib_alloc_lkey() 68 mr->lkey_published = 1; in qib_alloc_lkey() 93 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | in qib_alloc_lkey() 96 if (mr->lkey == 0) { in qib_alloc_lkey() 97 mr->lkey |= 1 << 8; in qib_alloc_lkey() 100 qib_get_mr(mr); in qib_alloc_lkey() 101 rcu_assign_pointer(rkt->table[r], mr); in qib_alloc_lkey() [all …]
|
D | qib_verbs.c | 187 qib_put_mr(sge->mr); in qib_copy_sge() 190 } else if (sge->length == 0 && sge->mr->lkey) { in qib_copy_sge() 192 if (++sge->m >= sge->mr->mapsz) in qib_copy_sge() 197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_sge() 199 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_sge() 228 qib_put_mr(sge->mr); in qib_skip_sge() 231 } else if (sge->length == 0 && sge->mr->lkey) { in qib_skip_sge() 233 if (++sge->m >= sge->mr->mapsz) in qib_skip_sge() 238 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_skip_sge() 240 sge->mr->map[sge->m]->segs[sge->n].length; in qib_skip_sge() [all …]
|
D | qib_verbs.h | 320 struct qib_mregion *mr; member 332 struct qib_mregion mr; /* must be last */ member 979 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region); 981 void qib_free_lkey(struct qib_mregion *mr); 1053 static inline void qib_get_mr(struct qib_mregion *mr) in qib_get_mr() argument 1055 atomic_inc(&mr->refcount); in qib_get_mr() 1060 static inline void qib_put_mr(struct qib_mregion *mr) in qib_put_mr() argument 1062 if (unlikely(atomic_dec_and_test(&mr->refcount))) in qib_put_mr() 1063 call_rcu(&mr->list, mr_rcu_callback); in qib_put_mr() 1069 qib_put_mr(ss->sge.mr); in qib_put_ss()
|
D | qib_rc.c | 97 if (e->rdma_sge.mr) { in qib_make_rc_ack() 98 qib_put_mr(e->rdma_sge.mr); in qib_make_rc_ack() 99 e->rdma_sge.mr = NULL; in qib_make_rc_ack() 129 if (len && !e->rdma_sge.mr) { in qib_make_rc_ack() 134 qp->s_rdma_mr = e->rdma_sge.mr; in qib_make_rc_ack() 173 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; in qib_make_rc_ack() 1015 qib_put_mr(sge->mr); in qib_rc_send_complete() 1071 qib_put_mr(sge->mr); in do_rc_completion() 1732 if (e->rdma_sge.mr) { in qib_rc_rcv_error() 1733 qib_put_mr(e->rdma_sge.mr); in qib_rc_rcv_error() [all …]
|
D | qib_ruc.c | 113 qib_put_mr(sge->mr); in qib_init_sge() 504 qib_put_mr(qp->r_sge.sge.mr); in qib_ruc_loopback() 528 qib_put_mr(sge->mr); in qib_ruc_loopback() 531 } else if (sge->length == 0 && sge->mr->lkey) { in qib_ruc_loopback() 533 if (++sge->m >= sge->mr->mapsz) in qib_ruc_loopback() 538 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ruc_loopback() 540 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ruc_loopback() 782 qib_put_mr(sge->mr); in qib_send_complete()
|
D | qib_ud.c | 191 } else if (sge->length == 0 && sge->mr->lkey) { in qib_ud_loopback() 193 if (++sge->m >= sge->mr->mapsz) in qib_ud_loopback() 198 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ud_loopback() 200 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ud_loopback()
|
D | qib_sdma.c | 623 } else if (sge->length == 0 && sge->mr->lkey) { in qib_sdma_verbs_send() 625 if (++sge->m >= sge->mr->mapsz) in qib_sdma_verbs_send() 630 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_sdma_verbs_send() 632 sge->mr->map[sge->m]->segs[sge->n].length; in qib_sdma_verbs_send()
|
D | qib_qp.c | 435 qib_put_mr(sge->mr); in clear_mr_refs() 457 e->rdma_sge.mr) { in clear_mr_refs() 458 qib_put_mr(e->rdma_sge.mr); in clear_mr_refs() 459 e->rdma_sge.mr = NULL; in clear_mr_refs()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_mr.c | 46 struct ipath_mregion mr; /* must be last */ member 65 struct ipath_mr *mr; in ipath_get_dma_mr() local 68 mr = kzalloc(sizeof *mr, GFP_KERNEL); in ipath_get_dma_mr() 69 if (!mr) { in ipath_get_dma_mr() 74 mr->mr.access_flags = acc; in ipath_get_dma_mr() 75 ret = &mr->ibmr; in ipath_get_dma_mr() 84 struct ipath_mr *mr; in alloc_mr() local 89 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); in alloc_mr() 90 if (!mr) in alloc_mr() 95 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); in alloc_mr() [all …]
|
D | ipath_keys.c | 47 int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) in ipath_alloc_lkey() argument 75 mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) | in ipath_alloc_lkey() 78 if (mr->lkey == 0) { in ipath_alloc_lkey() 79 mr->lkey |= 1 << 8; in ipath_alloc_lkey() 82 rkt->table[r] = mr; in ipath_alloc_lkey() 125 struct ipath_mregion *mr; in ipath_lkey_ok() local 142 isge->mr = NULL; in ipath_lkey_ok() 149 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; in ipath_lkey_ok() 150 if (unlikely(mr == NULL || mr->lkey != sge->lkey || in ipath_lkey_ok() 151 qp->ibqp.pd != mr->pd)) { in ipath_lkey_ok() [all …]
|
D | ipath_verbs.c | 190 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_copy_sge() 192 if (++sge->m >= sge->mr->mapsz) in ipath_copy_sge() 197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_copy_sge() 199 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_copy_sge() 229 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_skip_sge() 231 if (++sge->m >= sge->mr->mapsz) in ipath_skip_sge() 236 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_skip_sge() 238 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_skip_sge() 276 } else if (sge.length == 0 && sge.mr != NULL) { in ipath_count_sge() 278 if (++sge.m >= sge.mr->mapsz) in ipath_count_sge() [all …]
|
D | ipath_ud.c | 200 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_ud_loopback() 202 if (++sge->m >= sge->mr->mapsz) in ipath_ud_loopback() 207 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ud_loopback() 209 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ud_loopback()
|
D | ipath_ruc.c | 415 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_ruc_loopback() 417 if (++sge->m >= sge->mr->mapsz) in ipath_ruc_loopback() 422 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ruc_loopback() 424 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ruc_loopback()
|
D | ipath_sdma.c | 765 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_sdma_verbs_send() 767 if (++sge->m >= sge->mr->mapsz) in ipath_sdma_verbs_send() 772 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_sdma_verbs_send() 774 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_sdma_verbs_send()
|
D | ipath_verbs.h | 259 struct ipath_mregion *mr; member 271 struct ipath_mregion mr; /* must be last */ member 781 struct ipath_mregion *mr);
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
D | gddr5.c | 74 ram->mr[0] &= ~0xf7f; in nvkm_gddr5_calc() 75 ram->mr[0] |= (WR & 0x0f) << 8; in nvkm_gddr5_calc() 76 ram->mr[0] |= (CL & 0x0f) << 3; in nvkm_gddr5_calc() 77 ram->mr[0] |= (WL & 0x07) << 0; in nvkm_gddr5_calc() 79 ram->mr[1] &= ~0x0bf; in nvkm_gddr5_calc() 80 ram->mr[1] |= (xd & 0x01) << 7; in nvkm_gddr5_calc() 81 ram->mr[1] |= (at[0] & 0x03) << 4; in nvkm_gddr5_calc() 82 ram->mr[1] |= (dt & 0x03) << 2; in nvkm_gddr5_calc() 83 ram->mr[1] |= (ds & 0x03) << 0; in nvkm_gddr5_calc() 88 ram->mr1_nuts = ram->mr[1]; in nvkm_gddr5_calc() [all …]
|
D | gddr3.c | 88 DLL = !(ram->mr[1] & 0x1); in nvkm_gddr3_calc() 89 ODT = (ram->mr[1] & 0x004) >> 2 | in nvkm_gddr3_calc() 90 (ram->mr[1] & 0x040) >> 5 | in nvkm_gddr3_calc() 91 (ram->mr[1] & 0x200) >> 7; in nvkm_gddr3_calc() 97 hi = ram->mr[2] & 0x1; in nvkm_gddr3_calc() 103 ram->mr[0] &= ~0xf74; in nvkm_gddr3_calc() 104 ram->mr[0] |= (CWL & 0x07) << 9; in nvkm_gddr3_calc() 105 ram->mr[0] |= (CL & 0x07) << 4; in nvkm_gddr3_calc() 106 ram->mr[0] |= (CL & 0x08) >> 1; in nvkm_gddr3_calc() 108 ram->mr[1] &= ~0x3fc; in nvkm_gddr3_calc() [all …]
|
D | sddr3.c | 90 DLL = !(ram->mr[1] & 0x1); in nvkm_sddr3_calc() 91 ODT = (ram->mr[1] & 0x004) >> 2 | in nvkm_sddr3_calc() 92 (ram->mr[1] & 0x040) >> 5 | in nvkm_sddr3_calc() 93 (ram->mr[1] & 0x200) >> 7; in nvkm_sddr3_calc() 105 ram->mr[0] &= ~0xf74; in nvkm_sddr3_calc() 106 ram->mr[0] |= (WR & 0x07) << 9; in nvkm_sddr3_calc() 107 ram->mr[0] |= (CL & 0x0e) << 3; in nvkm_sddr3_calc() 108 ram->mr[0] |= (CL & 0x01) << 2; in nvkm_sddr3_calc() 110 ram->mr[1] &= ~0x245; in nvkm_sddr3_calc() 111 ram->mr[1] |= (ODT & 0x1) << 2; in nvkm_sddr3_calc() [all …]
|
D | sddr2.c | 84 ram->mr[0] &= ~0xf70; in nvkm_sddr2_calc() 85 ram->mr[0] |= (WR & 0x07) << 9; in nvkm_sddr2_calc() 86 ram->mr[0] |= (CL & 0x07) << 4; in nvkm_sddr2_calc() 88 ram->mr[1] &= ~0x045; in nvkm_sddr2_calc() 89 ram->mr[1] |= (ODT & 0x1) << 2; in nvkm_sddr2_calc() 90 ram->mr[1] |= (ODT & 0x2) << 5; in nvkm_sddr2_calc() 91 ram->mr[1] |= !DLL; in nvkm_sddr2_calc()
|
D | ramgt215.c | 427 ram_mask(fuc, mr[0], 0x100, 0x100); in nvkm_sddr2_dll_reset() 429 ram_mask(fuc, mr[0], 0x100, 0x000); in nvkm_sddr2_dll_reset() 434 nvkm_sddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr) in nvkm_sddr3_dll_disable() argument 436 u32 mr1_old = ram_rd32(fuc, mr[1]); in nvkm_sddr3_dll_disable() 440 ram_wr32(fuc, mr[1], mr[1]); in nvkm_sddr3_dll_disable() 446 nvkm_gddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr) in nvkm_gddr3_dll_disable() argument 448 u32 mr1_old = ram_rd32(fuc, mr[1]); in nvkm_gddr3_dll_disable() 451 ram_wr32(fuc, mr[1], mr[1]); in nvkm_gddr3_dll_disable() 560 ram->base.mr[0] = ram_rd32(fuc, mr[0]); in gt215_ram_calc() 561 ram->base.mr[1] = ram_rd32(fuc, mr[1]); in gt215_ram_calc() [all …]
|
D | ramgk104.c | 265 if ((ram->base.mr[1] & 0x03c) != 0x030) { in gk104_ram_calc_gddr5() 266 ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c); in gk104_ram_calc_gddr5() 267 ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000); in gk104_ram_calc_gddr5() 590 ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]); in gk104_ram_calc_gddr5() 591 ram_wr32(fuc, mr[0], ram->base.mr[0]); in gk104_ram_calc_gddr5() 592 ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]); in gk104_ram_calc_gddr5() 594 ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]); in gk104_ram_calc_gddr5() 595 ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */ in gk104_ram_calc_gddr5() 596 ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]); in gk104_ram_calc_gddr5() 597 ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]); in gk104_ram_calc_gddr5() [all …]
|
D | ramnv50.c | 161 ram_nuke(hwsq, mr[0]); /* force update */ in nv50_ram_calc() 162 ram_mask(hwsq, mr[0], 0x000, 0x000); in nv50_ram_calc() 165 ram_mask(hwsq, mr[2], 0x000, 0x000); in nv50_ram_calc() 166 ram_nuke(hwsq, mr[0]); /* force update */ in nv50_ram_calc() 167 ram_mask(hwsq, mr[0], 0x000, 0x000); in nv50_ram_calc() 192 ram_mask(hwsq, mr[0], 0x100, 0x100); in nv50_ram_calc() 193 ram_mask(hwsq, mr[0], 0x100, 0x000); in nv50_ram_calc()
|
/linux-4.1.27/drivers/scsi/ |
D | mesh.c | 304 volatile struct mesh_regs __iomem *mr = ms->mesh; in mesh_dump_regs() local 310 ms, mr, md); in mesh_dump_regs() 313 (mr->count_hi << 8) + mr->count_lo, mr->sequence, in mesh_dump_regs() 314 (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, in mesh_dump_regs() 315 mr->exception, mr->error, mr->intr_mask, mr->interrupt, in mesh_dump_regs() 316 mr->sync_params); in mesh_dump_regs() 317 while(in_8(&mr->fifo_count)) in mesh_dump_regs() 318 printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); in mesh_dump_regs() 338 static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) in mesh_flush_io() argument 340 (void)in_8(&mr->mesh_id); in mesh_flush_io() [all …]
|
D | qla1280.c | 1145 uint8_t mr; in qla1280_set_target_parameters() local 1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; in qla1280_set_target_parameters() 1171 mr |= BIT_6; in qla1280_set_target_parameters() 1177 status = qla1280_mailbox_command(ha, mr, mb); in qla1280_set_target_parameters() 2459 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) in qla1280_mailbox_command() argument 2485 if (mr & BIT_0) { in qla1280_mailbox_command() 2489 mr >>= 1; in qla1280_mailbox_command() 2532 mr = MAILBOX_REGISTER_COUNT; in qla1280_mailbox_command()
|
D | NCR5380.c | 410 unsigned char status, data, basr, mr, icr, i; in NCR5380_print() local 415 mr = NCR5380_read(MODE_REG); in NCR5380_print() 431 printk("\nMODE: %02x ", mr); in NCR5380_print() 433 if (mr & mrs[i].mask) in NCR5380_print()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | mr.c | 56 static int clean_mr(struct mlx5_ib_mr *mr); 58 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument 60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); in destroy_mkey() 82 struct mlx5_ib_mr *mr = context; in reg_mr_callback() local 83 struct mlx5_ib_dev *dev = mr->dev; in reg_mr_callback() 85 int c = order2idx(dev, mr->order); in reg_mr_callback() 97 kfree(mr); in reg_mr_callback() 103 if (mr->out.hdr.status) { in reg_mr_callback() 105 mr->out.hdr.status, in reg_mr_callback() 106 be32_to_cpu(mr->out.hdr.syndrome)); in reg_mr_callback() [all …]
|
D | odp.c | 49 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local 60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range() 62 if (!mr || !mr->ibmr.pd) in mlx5_ib_invalidate_range() 93 mlx5_ib_update_mtt(mr, blk_start_idx, in mlx5_ib_invalidate_range() 100 mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1, in mlx5_ib_invalidate_range() 153 struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr); in mlx5_ib_odp_find_mr_lkey() local 155 if (!mmr || mmr->key != key || !mr->live) in mlx5_ib_odp_find_mr_lkey() 195 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local 199 mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key); in pagefault_single_data_segment() 205 if (!mr || !mr->ibmr.pd) { in pagefault_single_data_segment() [all …]
|
D | main.c | 612 struct mlx5_core_mr mr; in alloc_pa_mkey() local 625 err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), in alloc_pa_mkey() 633 *key = mr.key; in alloc_pa_mkey() 645 struct mlx5_core_mr mr; in free_pa_mkey() local 648 memset(&mr, 0, sizeof(mr)); in free_pa_mkey() 649 mr.key = key; in free_pa_mkey() 650 err = mlx5_core_destroy_mkey(dev->mdev, &mr); in free_pa_mkey() 958 ib_dereg_mr(dev->umrc.mr); in destroy_umrc_res() 973 struct ib_mr *mr; in create_umr_res() local 990 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); in create_umr_res() [all …]
|
D | Makefile | 3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
|
D | cq.c | 413 struct mlx5_ib_mr *mr; in mlx5_poll_one() local 527 mr = to_mibmr(mmr); in mlx5_poll_one() 528 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); in mlx5_poll_one() 529 mr->sig->sig_err_exists = true; in mlx5_poll_one() 530 mr->sig->sigerr_count++; in mlx5_poll_one() 533 cq->mcq.cqn, mr->sig->err_item.key, in mlx5_poll_one() 534 mr->sig->err_item.err_type, in mlx5_poll_one() 535 mr->sig->err_item.sig_err_offset, in mlx5_poll_one() 536 mr->sig->err_item.expected, in mlx5_poll_one() 537 mr->sig->err_item.actual); in mlx5_poll_one()
|
D | mlx5_ib.h | 352 struct ib_mr *mr; member 366 struct mlx5_core_mr mr; member 571 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
D | hvCall.S | 65 mr r4,r3; \ 66 mr r3,r0; \ 78 mr r5,BUFREG; \ 140 mr r4,r5 141 mr r5,r6 142 mr r6,r7 143 mr r7,r8 144 mr r8,r9 145 mr r9,r10 165 mr r0,r4 [all …]
|
/linux-4.1.27/net/rds/ |
D | rdma.c | 71 struct rds_mr *mr; in rds_mr_tree_walk() local 75 mr = rb_entry(parent, struct rds_mr, r_rb_node); in rds_mr_tree_walk() 77 if (key < mr->r_key) in rds_mr_tree_walk() 79 else if (key > mr->r_key) in rds_mr_tree_walk() 82 return mr; in rds_mr_tree_walk() 96 static void rds_destroy_mr(struct rds_mr *mr) in rds_destroy_mr() argument 98 struct rds_sock *rs = mr->r_sock; in rds_destroy_mr() 103 mr->r_key, atomic_read(&mr->r_refcount)); in rds_destroy_mr() 105 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) in rds_destroy_mr() 109 if (!RB_EMPTY_NODE(&mr->r_rb_node)) in rds_destroy_mr() [all …]
|
D | iw.c | 94 rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd, in rds_iw_add_one() 98 if (IS_ERR(rds_iwdev->mr)) in rds_iw_add_one() 101 rds_iwdev->mr = NULL; in rds_iw_add_one() 118 if (rds_iwdev->mr) in rds_iw_add_one() 119 ib_dereg_mr(rds_iwdev->mr); in rds_iw_add_one() 149 if (rds_iwdev->mr) in rds_iw_remove_one() 150 ib_dereg_mr(rds_iwdev->mr); in rds_iw_remove_one()
|
D | iw_rdma.c | 49 struct ib_mr *mr; member 627 *key_ret = ibmr->mr->rkey; 667 struct ib_mr *mr; local 670 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size); 671 if (IS_ERR(mr)) { 672 err = PTR_ERR(mr); 686 ib_dereg_mr(mr); 691 ibmr->mr = mr; 707 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++); 708 mapping->m_rkey = ibmr->mr->rkey; [all …]
|
D | ib.c | 102 if (rds_ibdev->mr) in rds_ib_dev_free() 103 ib_dereg_mr(rds_ibdev->mr); in rds_ib_dev_free() 167 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE); in rds_ib_add_one() 168 if (IS_ERR(rds_ibdev->mr)) { in rds_ib_add_one() 169 rds_ibdev->mr = NULL; in rds_ib_add_one()
|
D | rds.h | 749 void __rds_put_mr_final(struct rds_mr *mr); 750 static inline void rds_mr_put(struct rds_mr *mr) in rds_mr_put() argument 752 if (atomic_dec_and_test(&mr->r_refcount)) in rds_mr_put() 753 __rds_put_mr_final(mr); in rds_mr_put()
|
D | ib.h | 176 struct ib_mr *mr; member
|
D | iw.h | 184 struct ib_mr *mr; member
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | mr.c | 59 struct mlx4_ib_mr *mr; in mlx4_ib_get_dma_mr() local 62 mr = kmalloc(sizeof *mr, GFP_KERNEL); in mlx4_ib_get_dma_mr() 63 if (!mr) in mlx4_ib_get_dma_mr() 67 ~0ull, convert_access(acc), 0, 0, &mr->mmr); in mlx4_ib_get_dma_mr() 71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr() 75 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_get_dma_mr() 76 mr->umem = NULL; in mlx4_ib_get_dma_mr() 78 return &mr->ibmr; in mlx4_ib_get_dma_mr() 81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr() 84 kfree(mr); in mlx4_ib_get_dma_mr() [all …]
|
D | Makefile | 3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
D | mad.c | 589 list.lkey = tun_ctx->mr->lkey; in mlx4_ib_send_to_slave() 1130 sg_list.lkey = ctx->mr->lkey; in mlx4_ib_post_pv_qp_buf() 1241 list.lkey = sqp_ctx->mr->lkey; in mlx4_ib_send_to_wire() 1819 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE); in create_pv_resources() 1820 if (IS_ERR(ctx->mr)) { in create_pv_resources() 1821 ret = PTR_ERR(ctx->mr); in create_pv_resources() 1869 ib_dereg_mr(ctx->mr); in create_pv_resources() 1870 ctx->mr = NULL; in create_pv_resources() 1908 ib_dereg_mr(ctx->mr); in destroy_pv_resources() 1909 ctx->mr = NULL; in destroy_pv_resources() [all …]
|
D | mlx4_ib.h | 417 struct ib_mr *mr; member 658 int mlx4_ib_dereg_mr(struct ib_mr *mr); 814 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
/linux-4.1.27/arch/x86/mm/ |
D | init.c | 186 static int __meminit save_mr(struct map_range *mr, int nr_range, in save_mr() argument 193 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 194 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr() 195 mr[nr_range].page_size_mask = page_size_mask; in save_mr() 206 static void __init_refok adjust_range_page_size_mask(struct map_range *mr, in adjust_range_page_size_mask() argument 213 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { in adjust_range_page_size_mask() 214 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() 215 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 223 mr[i].page_size_mask |= 1<<PG_LEVEL_2M; in adjust_range_page_size_mask() 226 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { in adjust_range_page_size_mask() [all …]
|
/linux-4.1.27/drivers/rtc/ |
D | rtc-at91sam9.c | 141 u32 offset, alarm, mr; in at91_rtc_settime() local 152 mr = rtt_readl(rtc, MR); in at91_rtc_settime() 155 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); in at91_rtc_settime() 176 mr &= ~AT91_RTT_ALMIEN; in at91_rtc_settime() 182 rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST); in at91_rtc_settime() 219 u32 mr; in at91_rtc_setalarm() local 231 mr = rtt_readl(rtc, MR); in at91_rtc_setalarm() 232 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); in at91_rtc_setalarm() 243 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); in at91_rtc_setalarm() 255 u32 mr = rtt_readl(rtc, MR); in at91_rtc_alarm_irq_enable() local [all …]
|
/linux-4.1.27/drivers/sh/intc/ |
D | handle.c | 44 struct intc_mask_reg *mr = desc->hw.mask_regs; in _intc_mask_data() local 48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { in _intc_mask_data() 49 mr = desc->hw.mask_regs + *reg_idx; in _intc_mask_data() 51 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { in _intc_mask_data() 52 if (mr->enum_ids[*fld_idx] != enum_id) in _intc_mask_data() 55 if (mr->set_reg && mr->clr_reg) { in _intc_mask_data() 58 reg_e = mr->clr_reg; in _intc_mask_data() 59 reg_d = mr->set_reg; in _intc_mask_data() 62 if (mr->set_reg) { in _intc_mask_data() 64 reg_e = mr->set_reg; in _intc_mask_data() [all …]
|
D | balancing.c | 44 struct intc_mask_reg *mr = desc->hw.mask_regs; in intc_dist_data() local 48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { in intc_dist_data() 49 mr = desc->hw.mask_regs + i; in intc_dist_data() 55 if (!mr->dist_reg) in intc_dist_data() 58 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { in intc_dist_data() 59 if (mr->enum_ids[j] != enum_id) in intc_dist_data() 64 reg_e = mr->dist_reg; in intc_dist_data() 65 reg_d = mr->dist_reg; in intc_dist_data() 67 fn += (mr->reg_width >> 3) - 1; in intc_dist_data() 72 (mr->reg_width - 1) - j); in intc_dist_data()
|
/linux-4.1.27/net/ipv4/netfilter/ |
D | ipt_MASQUERADE.c | 34 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in masquerade_tg_check() local 36 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in masquerade_tg_check() 40 if (mr->rangesize != 1) { in masquerade_tg_check() 41 pr_debug("bad rangesize %u\n", mr->rangesize); in masquerade_tg_check() 51 const struct nf_nat_ipv4_multi_range_compat *mr; in masquerade_tg() local 53 mr = par->targinfo; in masquerade_tg() 54 range.flags = mr->range[0].flags; in masquerade_tg() 55 range.min_proto = mr->range[0].min; in masquerade_tg() 56 range.max_proto = mr->range[0].max; in masquerade_tg()
|
D | nft_redir_ipv4.c | 25 struct nf_nat_ipv4_multi_range_compat mr; in nft_redir_ipv4_eval() local 27 memset(&mr, 0, sizeof(mr)); in nft_redir_ipv4_eval() 29 mr.range[0].min.all = in nft_redir_ipv4_eval() 31 mr.range[0].max.all = in nft_redir_ipv4_eval() 33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; in nft_redir_ipv4_eval() 36 mr.range[0].flags |= priv->flags; in nft_redir_ipv4_eval() 38 regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, in nft_redir_ipv4_eval()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | head_64.S | 127 mr r24,r3 129 mr r25,r4 151 mr r3,r24 157 mr r4,r25 206 mr r24,r3 217 mr r3,r24 233 mr r24,r3 234 mr r25,r4 245 mr r3,r24 246 mr r4,r25 [all …]
|
D | misc_64.S | 36 mr r1,r3 47 mr r1,r4 225 mr r6,r3 545 mr r1,r3 564 mr r31,r3 /* newstack (both) */ 565 mr r30,r4 /* start (real) */ 566 mr r29,r5 /* image (virt) */ 567 mr r28,r6 /* control, unused */ 568 mr r27,r7 /* clear_all() fn desc */ 569 mr r26,r8 /* spare */ [all …]
|
D | misc_32.S | 49 mr r1,r3 69 mr r1,r4 86 mr r10,r4 94 mr r10,r3 360 mr r6,r3 460 mr r6,r3 506 mr r6,r3 702 mr r3,r9 703 mr r4,r10 741 mr r29, r3 [all …]
|
D | head_fsl_booke.S | 69 mr r30,r3 70 mr r31,r4 81 mr r23,r3 82 mr r25,r4 250 mr r3,r30 251 mr r4,r31 253 mr r5,r23 254 mr r6,r25 256 mr r5,r25 275 mr r3,r30 [all …]
|
D | head_booke.h | 43 mr r11, r1; \ 63 mr r1, r11; \ 151 mr r11,r8; \ 164 mr r1,r11; \ 387 mr r4,r12; /* Pass SRR0 as arg2 */ \
|
D | entry_64.S | 55 mr r10,r1 246 mr r0,r3 374 mr r3,r15 376 mr r12,r14 529 mr r1,r8 /* start using new stack pointer */ 658 mr r30,r4 660 mr r4,r30 674 mr r4,r1 /* src: current exception frame */ 675 mr r1,r3 /* Reroute the trampoline frame to r1 */ 1248 mr r31, r1
|
D | head_32.S | 142 1: mr r31,r3 /* save device tree ptr */ 191 mr r26,r3 230 mr r24,r3 /* cpu # */ 409 mr r4,r12 /* SRR0 is fault address */ 411 1: mr r4,r12 412 mr r5,r9 776 4: mr r5,r25 811 mr r24, r3 /* cpu # */ 839 mr r4,r24 967 mr r4,r31
|
D | head_8xx.S | 90 mr r31,r3 /* save device tree ptr */ 444 mr r4,r12 445 mr r5,r9 653 mr r4,r31 729 mr r8, r10 767 mr r8, r9 /* Create vaddr for TLB */ 773 mr r8, r9 /* Create paddr for TLB */
|
D | entry_32.S | 349 mr r6,r3 434 mr r3,r15 452 mr r0,r3 571 mr r5,r3 644 mr r3,r2 707 mr r12,r4 /* restart at exc_exit_restart */ 782 mr r4,r1 /* src: current exception frame */ 783 mr r1,r3 /* Reroute the trampoline frame to r1 */ 1189 mr r4,r9 1212 mr r12,r11 /* restart at exc_exit_restart */ [all …]
|
D | exceptions-64e.S | 587 mr r14,r10 703 mr r9,r13 /* keep a copy of userland r13 */ 774 mr r4,r14 839 mr r4,r14 1013 mr r4,r14 1014 mr r5,r15 1022 mr r5,r3 1056 1: mr r0,r13 1241 1: mr r7,r3 /* Set MAS0(TLBSEL) */ 1268 mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
|
D | head_40x.S | 61 mr r31,r3 /* save device tree ptr */ 395 mr r4,r12 /* Pass SRR0 as arg2 */ 520 mr r11, r12 620 mr r11, r12 850 mr r4,r31
|
D | misc.S | 115 mr r3,r4
|
D | exceptions-64s.S | 37 mr r9,r13 ; \ 479 mr r11,r1 /* Save r1 */ 1288 mr r10,r1 /* Save r1 */ 1612 mr r5,r3 1632 mr r5,r3 1646 mr r4,r3
|
D | head_44x.S | 64 mr r31,r3 /* save device tree ptr */ 205 mr r4,r31 896 mr r4,r25 1002 mr r24,r3 /* CPU number */
|
D | fsl_booke_entry_mapping.S | 210 mr r6, r25
|
/linux-4.1.27/net/mac80211/ |
D | rc80211_minstrel.c | 73 int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma) in minstrel_get_tp_avg() argument 77 usecs = mr->perfect_tx_time; in minstrel_get_tp_avg() 82 if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100)) in minstrel_get_tp_avg() 202 struct minstrel_rate *mr = &mi->r[i]; in minstrel_update_stats() local 213 mr->adjusted_retry_count = mrs->retry_count >> 1; in minstrel_update_stats() 214 if (mr->adjusted_retry_count > 2) in minstrel_update_stats() 215 mr->adjusted_retry_count = 2; in minstrel_update_stats() 216 mr->sample_limit = 4; in minstrel_update_stats() 218 mr->sample_limit = -1; in minstrel_update_stats() 219 mr->adjusted_retry_count = mrs->retry_count; in minstrel_update_stats() [all …]
|
D | rc80211_minstrel_debugfs.c | 96 struct minstrel_rate *mr = &mi->r[i]; in minstrel_stats_open() local 105 p += sprintf(p, " %3u%s ", mr->bitrate / 2, in minstrel_stats_open() 106 (mr->bitrate & 1 ? ".5" : " ")); in minstrel_stats_open() 108 p += sprintf(p, "%6u ", mr->perfect_tx_time); in minstrel_stats_open() 110 tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); in minstrel_stats_open() 111 tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); in minstrel_stats_open() 164 struct minstrel_rate *mr = &mi->r[i]; in minstrel_stats_csv_open() local 173 p += sprintf(p, ",%u%s", mr->bitrate / 2, in minstrel_stats_csv_open() 174 (mr->bitrate & 1 ? ".5," : ",")); in minstrel_stats_csv_open() 176 p += sprintf(p, "%u,",mr->perfect_tx_time); in minstrel_stats_csv_open() [all …]
|
D | rc80211_minstrel.h | 162 int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 421 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument 423 mr->iova = iova; in mlx4_mr_alloc_reserved() 424 mr->size = size; in mlx4_mr_alloc_reserved() 425 mr->pd = pd; in mlx4_mr_alloc_reserved() 426 mr->access = access; in mlx4_mr_alloc_reserved() 427 mr->enabled = MLX4_MPT_DISABLED; in mlx4_mr_alloc_reserved() 428 mr->key = hw_index_to_key(mridx); in mlx4_mr_alloc_reserved() 430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 530 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 540 access, npages, page_shift, mr); in mlx4_mr_alloc() [all …]
|
D | en_main.c | 218 (void) mlx4_mr_free(dev, &mdev->mr); in mlx4_en_remove() 261 0, 0, &mdev->mr)) { in mlx4_en_add() 265 if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { in mlx4_en_add() 318 (void) mlx4_mr_free(dev, &mdev->mr); in mlx4_en_add()
|
D | Makefile | 4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | mr.c | 51 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, in mlx5_core_create_mkey() argument 85 mr->iova = be64_to_cpu(in->seg.start_addr); in mlx5_core_create_mkey() 86 mr->size = be64_to_cpu(in->seg.len); in mlx5_core_create_mkey() 87 mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; in mlx5_core_create_mkey() 88 mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; in mlx5_core_create_mkey() 91 be32_to_cpu(lout.mkey), key, mr->key); in mlx5_core_create_mkey() 95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); in mlx5_core_create_mkey() 99 mlx5_base_mkey(mr->key), err); in mlx5_core_create_mkey() 100 mlx5_core_destroy_mkey(dev, mr); in mlx5_core_create_mkey() 107 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) in mlx5_core_destroy_mkey() argument [all …]
|
D | Makefile | 4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 430 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) in mthca_mr_alloc() argument 444 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); in mthca_mr_alloc() 463 if (!mr->mtt) in mthca_mr_alloc() 475 if (mr->mtt) in mthca_mr_alloc() 478 mr->mtt->first_seg * dev->limits.mtt_seg_size); in mthca_mr_alloc() 481 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); in mthca_mr_alloc() 513 u32 access, struct mthca_mr *mr) in mthca_mr_alloc_notrans() argument 515 mr->mtt = NULL; in mthca_mr_alloc_notrans() 516 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); in mthca_mr_alloc_notrans() 522 u32 access, struct mthca_mr *mr) in mthca_mr_alloc_phys() argument [all …]
|
D | mthca_provider.c | 465 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq() 560 qp->mr.ibmr.lkey = ucmd.lkey; in mthca_create_qp() 681 cq->buf.mr.ibmr.lkey = ucmd.lkey; in mthca_create_cq() 790 lkey = cq->resize_buf->buf.mr.ibmr.lkey; in mthca_resize_cq() 872 struct mthca_mr *mr; in mthca_get_dma_mr() local 875 mr = kmalloc(sizeof *mr, GFP_KERNEL); in mthca_get_dma_mr() 876 if (!mr) in mthca_get_dma_mr() 881 convert_access(acc), mr); in mthca_get_dma_mr() 884 kfree(mr); in mthca_get_dma_mr() 888 mr->umem = NULL; in mthca_get_dma_mr() [all …]
|
D | mthca_provider.h | 115 struct mthca_mr mr; member 187 struct mthca_mr mr; member 240 struct mthca_mr mr; member 273 struct mthca_mr mr; member
|
D | mthca_allocator.c | 196 int hca_write, struct mthca_mr *mr) in mthca_buf_alloc() argument 265 mr); in mthca_buf_alloc() 283 int is_direct, struct mthca_mr *mr) in mthca_buf_free() argument 287 if (mr) in mthca_buf_free() 288 mthca_free_mr(dev, mr); in mthca_buf_free()
|
D | mthca_dev.h | 426 int hca_write, struct mthca_mr *mr); 428 int is_direct, struct mthca_mr *mr); 472 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr); 474 u32 access, struct mthca_mr *mr); 478 u32 access, struct mthca_mr *mr); 479 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
|
D | mthca_srq.c | 104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context() 129 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_arbel_init_srq_context() 143 srq->is_direct, &srq->mr); in mthca_free_srq_buf() 164 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); in mthca_alloc_srq_buf()
|
D | mthca_eq.c | 523 &eq->mr); in mthca_create_eq() 543 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); in mthca_create_eq() 565 mthca_free_mr(dev, &eq->mr); in mthca_create_eq() 618 mthca_free_mr(dev, &eq->mr); in mthca_free_eq()
|
D | mthca_cq.c | 361 &dev->driver_pd, 1, &buf->mr); in mthca_alloc_cq_buf() 374 buf->is_direct, &buf->mr); in mthca_free_cq_buf() 842 cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey); in mthca_init_cq()
|
/linux-4.1.27/net/netfilter/ |
D | xt_NETMAP.c | 72 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in netmap_tg4() local 81 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); in netmap_tg4() 88 new_ip |= mr->range[0].min_ip & netmask; in netmap_tg4() 92 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; in netmap_tg4() 95 newrange.min_proto = mr->range[0].min; in netmap_tg4() 96 newrange.max_proto = mr->range[0].max; in netmap_tg4() 104 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in netmap_tg4_check() local 106 if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { in netmap_tg4_check() 110 if (mr->rangesize != 1) { in netmap_tg4_check() 111 pr_debug("bad rangesize %u.\n", mr->rangesize); in netmap_tg4_check()
|
D | xt_nat.c | 19 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_nat_checkentry_v0() local 21 if (mr->rangesize != 1) { in xt_nat_checkentry_v0() 45 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_snat_target_v0() local 55 xt_nat_convert_range(&range, &mr->range[0]); in xt_snat_target_v0() 62 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_dnat_target_v0() local 71 xt_nat_convert_range(&range, &mr->range[0]); in xt_dnat_target_v0()
|
D | xt_REDIRECT.c | 49 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in redirect_tg4_check() local 51 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in redirect_tg4_check() 55 if (mr->rangesize != 1) { in redirect_tg4_check() 56 pr_debug("bad rangesize %u.\n", mr->rangesize); in redirect_tg4_check()
|
D | nf_nat_redirect.c | 33 const struct nf_nat_ipv4_multi_range_compat *mr, in nf_nat_redirect_ipv4() argument 71 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; in nf_nat_redirect_ipv4() 74 newrange.min_proto = mr->range[0].min; in nf_nat_redirect_ipv4() 75 newrange.max_proto = mr->range[0].max; in nf_nat_redirect_ipv4()
|
/linux-4.1.27/drivers/infiniband/core/ |
D | verbs.c | 1061 struct ib_mr *mr; in ib_get_dma_mr() local 1068 mr = pd->device->get_dma_mr(pd, mr_access_flags); in ib_get_dma_mr() 1070 if (!IS_ERR(mr)) { in ib_get_dma_mr() 1071 mr->device = pd->device; in ib_get_dma_mr() 1072 mr->pd = pd; in ib_get_dma_mr() 1073 mr->uobject = NULL; in ib_get_dma_mr() 1075 atomic_set(&mr->usecnt, 0); in ib_get_dma_mr() 1078 return mr; in ib_get_dma_mr() 1088 struct ib_mr *mr; in ib_reg_phys_mr() local 1098 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, in ib_reg_phys_mr() [all …]
|
D | uverbs_cmd.c | 945 struct ib_mr *mr; in ib_uverbs_reg_mr() local 990 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, in ib_uverbs_reg_mr() 992 if (IS_ERR(mr)) { in ib_uverbs_reg_mr() 993 ret = PTR_ERR(mr); in ib_uverbs_reg_mr() 997 mr->device = pd->device; in ib_uverbs_reg_mr() 998 mr->pd = pd; in ib_uverbs_reg_mr() 999 mr->uobject = uobj; in ib_uverbs_reg_mr() 1001 atomic_set(&mr->usecnt, 0); in ib_uverbs_reg_mr() 1003 uobj->object = mr; in ib_uverbs_reg_mr() 1009 resp.lkey = mr->lkey; in ib_uverbs_reg_mr() [all …]
|
D | mad_priv.h | 203 struct ib_mr *mr; member
|
D | mad.c | 341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, in ib_register_mad_agent() 343 if (IS_ERR(mad_agent_priv->agent.mr)) { in ib_register_mad_agent() 432 ib_dereg_mr(mad_agent_priv->agent.mr); in ib_register_mad_agent() 593 ib_dereg_mr(mad_agent_priv->agent.mr); in unregister_mad_agent() 956 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; in ib_create_send_mad() 958 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; in ib_create_send_mad() 2653 sg_list.lkey = (*qp_info->port_priv->mr).lkey; in ib_mad_post_receive_mads() 2961 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); in ib_mad_port_open() 2962 if (IS_ERR(port_priv->mr)) { in ib_mad_port_open() 2964 ret = PTR_ERR(port_priv->mr); in ib_mad_port_open() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 713 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, in ocrdma_alloc_lkey() argument 718 mr->hwmr.fr_mr = 0; in ocrdma_alloc_lkey() 719 mr->hwmr.local_rd = 1; in ocrdma_alloc_lkey() 720 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; in ocrdma_alloc_lkey() 721 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; in ocrdma_alloc_lkey() 722 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; in ocrdma_alloc_lkey() 723 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; in ocrdma_alloc_lkey() 724 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; in ocrdma_alloc_lkey() 725 mr->hwmr.num_pbls = num_pbls; in ocrdma_alloc_lkey() 727 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); in ocrdma_alloc_lkey() [all …]
|
/linux-4.1.27/drivers/watchdog/ |
D | at91sam9_wdt.c | 88 u32 mr; member 165 if ((tmp & mask) != (wdt->mr & mask)) { in at91_wdt_init() 167 wdt_write(wdt, AT91_WDT_MR, wdt->mr); in at91_wdt_init() 173 if (wdt->mr & AT91_WDT_WDDIS) in at91_wdt_init() 218 if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask)) in at91_wdt_init() 221 tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask); in at91_wdt_init() 294 wdt->mr = 0; in of_at91wdt_init() 297 wdt->mr |= AT91_WDT_WDFIEN; in of_at91wdt_init() 300 wdt->mr |= AT91_WDT_WDRSTEN; in of_at91wdt_init() 305 wdt->mr |= AT91_WDT_WDRPROC; in of_at91wdt_init() [all …]
|
/linux-4.1.27/drivers/isdn/hisax/ |
D | hfc4s8s_l1.c | 195 } mr; member 319 l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= in dch_l2l1() 399 l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= in bch_l2l1() 418 l1->hw->mr.timer_usg_cnt++; in bch_l2l1() 419 l1->hw->mr. in bch_l2l1() 448 l1->hw->mr.r_ctrl0 |= in bch_l2l1() 451 l1->hw->mr.r_ctrl0); in bch_l2l1() 465 l1->hw->mr. in bch_l2l1() 471 l1->hw->mr.timer_usg_cnt++; in bch_l2l1() 495 l1->hw->mr.r_ctrl0 |= in bch_l2l1() [all …]
|
/linux-4.1.27/arch/arm/mach-omap2/ |
D | sdram-hynix-h8mbx00u0mer-0em.h | 23 .mr = 0x00000032, 30 .mr = 0x00000032, 37 .mr = 0x00000022, 44 .mr = 0x00000022,
|
D | sdram-micron-mt46h32m32lf-6.h | 27 .mr = 0x00000032, 34 .mr = 0x00000032, 41 .mr = 0x00000032, 48 .mr = 0x00000032,
|
D | sdram-numonyx-m65kxxxxam.h | 23 .mr = 0x00000032, 30 .mr = 0x00000032, 37 .mr = 0x00000032, 44 .mr = 0x00000032,
|
D | sdram-qimonda-hyb18m512160af-6.h | 26 .mr = 0x00000032, 33 .mr = 0x00000032, 40 .mr = 0x00000022, 47 .mr = 0x00000022,
|
D | clkt34xx_dpll3m2.c | 98 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr); in omap3_core_dpll_m2_set_rate() 102 sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); in omap3_core_dpll_m2_set_rate() 108 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, in omap3_core_dpll_m2_set_rate() 110 sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); in omap3_core_dpll_m2_set_rate() 115 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, in omap3_core_dpll_m2_set_rate()
|
D | sdrc.h | 75 u32 mr; member
|
D | sdram-nokia.c | 277 nokia_sdrc_params[id].mr = 0x32; in sdrc_timings()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | hash_low_64.S | 161 mr r4,r30 162 mr r5,r7 200 mr r4,r29 /* Retrieve vpn */ 225 mr r4,r29 /* Retrieve vpn */ 242 mr r5,r28 283 mr r4,r3 287 mr r5,r28 298 mr r5,r29 /* vpn */ 477 mr r4,r30 478 mr r5,r7 [all …]
|
D | slb_low.S | 149 mr r11,r10
|
/linux-4.1.27/sound/soc/fsl/ |
D | fsl_dma.c | 396 u32 mr; in fsl_dma_open() local 482 mr = in_be32(&dma_channel->mr) & in fsl_dma_open() 500 mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN | in fsl_dma_open() 505 mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? in fsl_dma_open() 508 out_be32(&dma_channel->mr, mr); in fsl_dma_open() 567 u32 mr; /* DMA Mode Register */ in fsl_dma_hw_params() local 582 mr = in_be32(&dma_channel->mr) & ~(CCSR_DMA_MR_BWC_MASK | in fsl_dma_hw_params() 592 mr |= CCSR_DMA_MR_DAHTS_1 | CCSR_DMA_MR_SAHTS_1; in fsl_dma_hw_params() 596 mr |= CCSR_DMA_MR_DAHTS_2 | CCSR_DMA_MR_SAHTS_2; in fsl_dma_hw_params() 600 mr |= CCSR_DMA_MR_DAHTS_4 | CCSR_DMA_MR_SAHTS_4; in fsl_dma_hw_params() [all …]
|
D | fsl_dma.h | 15 __be32 mr; /* Mode register */ member
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_mrmw.c | 440 int ehca_rereg_phys_mr(struct ib_mr *mr, in ehca_rereg_phys_mr() argument 451 container_of(mr->device, struct ehca_shca, ib_device); in ehca_rereg_phys_mr() 452 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); in ehca_rereg_phys_mr() 465 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not " in ehca_rereg_phys_mr() 473 ehca_err(mr->device, "rereg with bad pd, pd=%p " in ehca_rereg_phys_mr() 490 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p " in ehca_rereg_phys_mr() 492 mr, shca->maxmr, mr->lkey); in ehca_rereg_phys_mr() 498 ehca_err(mr->device, "not supported for FMR, mr=%p " in ehca_rereg_phys_mr() 499 "flags=%x", mr, e_mr->flags); in ehca_rereg_phys_mr() 504 ehca_err(mr->device, "bad input values mr_rereg_mask=%x" in ehca_rereg_phys_mr() [all …]
|
D | hcp_if.h | 198 const struct ehca_mr *mr, 207 const struct ehca_mr *mr, 215 const struct ehca_mr *mr, 220 const struct ehca_mr *mr); 224 const struct ehca_mr *mr, 234 const struct ehca_mr *mr,
|
D | hcp_if.c | 732 const struct ehca_mr *mr, in hipz_h_alloc_resource_mr() argument 758 const struct ehca_mr *mr, in hipz_h_register_rpage_mr() argument 784 adapter_handle.handle, mr, in hipz_h_register_rpage_mr() 785 mr->ipz_mr_handle.handle, pagesize, queue_type, in hipz_h_register_rpage_mr() 791 mr->ipz_mr_handle.handle, in hipz_h_register_rpage_mr() 797 const struct ehca_mr *mr, in hipz_h_query_mr() argument 805 mr->ipz_mr_handle.handle, /* r5 */ in hipz_h_query_mr() 817 const struct ehca_mr *mr) in hipz_h_free_resource_mr() argument 821 mr->ipz_mr_handle.handle, /* r5 */ in hipz_h_free_resource_mr() 826 const struct ehca_mr *mr, in hipz_h_reregister_pmr() argument [all …]
|
D | ehca_iverbs.h | 88 int ehca_rereg_phys_mr(struct ib_mr *mr, 94 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 96 int ehca_dereg_mr(struct ib_mr *mr);
|
D | ehca_mrmw.h | 125 void ehca_mr_deletenew(struct ehca_mr *mr);
|
/linux-4.1.27/drivers/tty/serial/ |
D | msm_serial.c | 361 unsigned int mr; in msm_set_mctrl() local 363 mr = msm_read(port, UART_MR1); in msm_set_mctrl() 366 mr &= ~UART_MR1_RX_RDY_CTL; in msm_set_mctrl() 367 msm_write(port, mr, UART_MR1); in msm_set_mctrl() 370 mr |= UART_MR1_RX_RDY_CTL; in msm_set_mctrl() 371 msm_write(port, mr, UART_MR1); in msm_set_mctrl() 523 unsigned int baud, mr; in msm_set_termios() local 534 mr = msm_read(port, UART_MR2); in msm_set_termios() 535 mr &= ~UART_MR2_PARITY_MODE; in msm_set_termios() 538 mr |= UART_MR2_PARITY_MODE_ODD; in msm_set_termios() [all …]
|
D | atmel_serial.c | 2376 unsigned int mr, quot; in atmel_console_get_options() local 2386 mr = UART_GET_MR(port) & ATMEL_US_CHRL; in atmel_console_get_options() 2387 if (mr == ATMEL_US_CHRL_8) in atmel_console_get_options() 2392 mr = UART_GET_MR(port) & ATMEL_US_PAR; in atmel_console_get_options() 2393 if (mr == ATMEL_US_PAR_EVEN) in atmel_console_get_options() 2395 else if (mr == ATMEL_US_PAR_ODD) in atmel_console_get_options()
|
/linux-4.1.27/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.c | 598 struct usnic_ib_mr *mr; in usnic_ib_reg_mr() local 604 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in usnic_ib_reg_mr() 605 if (IS_ERR_OR_NULL(mr)) in usnic_ib_reg_mr() 606 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM); in usnic_ib_reg_mr() 608 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, in usnic_ib_reg_mr() 610 if (IS_ERR_OR_NULL(mr->umem)) { in usnic_ib_reg_mr() 611 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; in usnic_ib_reg_mr() 615 mr->ibmr.lkey = mr->ibmr.rkey = 0; in usnic_ib_reg_mr() 616 return &mr->ibmr; in usnic_ib_reg_mr() 619 kfree(mr); in usnic_ib_reg_mr() [all …]
|
/linux-4.1.27/arch/powerpc/lib/ |
D | string_64.S | 66 mr r3,r8 77 mr r3,r4 85 mr r8,r3 133 6: mr r8,r3 181 mr r8,r3 192 mr r8,r3
|
D | ldstfp.S | 100 mr r3,r9 127 mr r3,r9 154 mr r3,r9 181 mr r3,r9 248 mr r3,r9 276 mr r3,r9 344 mr r3,r9 372 mr r3,r9
|
D | div64.S | 30 1: mr r11,r5 # here dividend.hi != 0 58 mr r3,r6 # return the remainder in r3
|
D | copyuser_64.S | 72 mr r9,r7 73 mr r8,r6 325 mr r4,r3 326 mr r3,r5 /* return the number of bytes not copied */
|
D | mem_64.S | 23 mr r6,r3
|
D | string.S | 152 90: mr r3,r4
|
D | memcmp_64.S | 81 mr r3,rC
|
D | memcpy_64.S | 67 mr r8,r9
|
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 815 struct ehea_mr *mr) in ehea_reg_mr_section() argument 830 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, in ehea_reg_mr_section() 835 ehea_h_free_resource(adapter->handle, mr->handle, in ehea_reg_mr_section() 846 struct ehea_mr *mr) in ehea_reg_mr_sections() argument 855 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); in ehea_reg_mr_sections() 864 struct ehea_mr *mr) in ehea_reg_mr_dir_sections() argument 873 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); in ehea_reg_mr_dir_sections() 880 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) in ehea_reg_kernel_mr() argument 898 &mr->handle, &mr->lkey); in ehea_reg_kernel_mr() 907 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); in ehea_reg_kernel_mr() [all …]
|
D | ehea_qmr.h | 388 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr); 393 int ehea_rem_mr(struct ehea_mr *mr);
|
D | ehea_phyp.c | 449 struct ehea_mr *mr) in ehea_h_register_smr() argument 463 mr->handle = outs[0]; in ehea_h_register_smr() 464 mr->lkey = (u32)outs[2]; in ehea_h_register_smr()
|
D | ehea.h | 398 struct ehea_mr mr; member
|
D | ehea_main.c | 227 if (adapter->mr.handle) { in ehea_update_firmware_handles() 229 arr[i++].fwh = adapter->mr.handle; in ehea_update_firmware_handles() 1423 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs() 1427 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs() 2345 ehea_rem_mr(&adapter->mr); in ehea_remove_adapter_mr() 2353 return ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_add_adapter_mr() 2782 ret = ehea_rem_mr(&adapter->mr); in ehea_rereg_mrs() 2794 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_rereg_mrs()
|
D | ehea_phyp.h | 416 struct ehea_mr *mr);
|
/linux-4.1.27/arch/powerpc/kernel/vdso32/ |
D | gettimeofday.S | 40 mr r10,r3 /* r10 saves tv */ 41 mr r11,r4 /* r11 saves tz */ 43 mr r9, r3 /* datapage ptr in r9 */ 82 mr r11,r4 /* r11 saves tp */ 84 mr r9,r3 /* datapage ptr in r9 */ 194 mr r11,r3 /* r11 holds t */ 196 mr r9, r3 /* datapage ptr in r9 */
|
D | cacheflush.S | 31 mr r11,r3 34 mr r10,r3
|
D | datapage.S | 56 mr r4,r3
|
/linux-4.1.27/drivers/video/fbdev/omap2/omapfb/ |
D | omapfb-ioctl.c | 487 struct omapfb_memory_read *mr) in omapfb_memory_read() argument 496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) in omapfb_memory_read() 499 if (mr->w * mr->h * 3 > mr->buffer_size) in omapfb_memory_read() 502 buf = vmalloc(mr->buffer_size); in omapfb_memory_read() 508 r = display->driver->memory_read(display, buf, mr->buffer_size, in omapfb_memory_read() 509 mr->x, mr->y, mr->w, mr->h); in omapfb_memory_read() 512 if (copy_to_user(mr->buffer, buf, mr->buffer_size)) in omapfb_memory_read()
|
/linux-4.1.27/drivers/scsi/qla2xxx/ |
D | qla_mr.c | 702 snprintf(str, size, "%s", ha->mr.fw_version); in qlafx00_fw_version_str() 1252 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, in qlafx00_configure_all_targets() 1357 ha->mr.fw_hbt_en = 0; in qlafx00_abort_isp_cleanup() 1470 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) in qlafx00_rescan_isp() 1488 if (ha->mr.fw_hbt_cnt) in qlafx00_timer_routine() 1489 ha->mr.fw_hbt_cnt--; in qlafx00_timer_routine() 1494 (ha->mr.fw_hbt_en)) { in qlafx00_timer_routine() 1496 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { in qlafx00_timer_routine() 1497 ha->mr.old_fw_hbt_cnt = fw_heart_beat; in qlafx00_timer_routine() 1498 ha->mr.fw_hbt_miss_cnt = 0; in qlafx00_timer_routine() [all …]
|
D | qla_os.c | 2501 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; in qla2x00_probe_one() 2502 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; in qla2x00_probe_one() 2503 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; in qla2x00_probe_one() 2504 ha->mr.fw_hbt_en = 1; in qla2x00_probe_one() 2505 ha->mr.host_info_resend = false; in qla2x00_probe_one() 2506 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; in qla2x00_probe_one() 2595 ha->mr.fcport.vha = base_vha; in qla2x00_probe_one() 2596 ha->mr.fcport.port_type = FCT_UNKNOWN; in qla2x00_probe_one() 2597 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; in qla2x00_probe_one() 2598 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); in qla2x00_probe_one() [all …]
|
/linux-4.1.27/arch/powerpc/kernel/vdso64/ |
D | gettimeofday.S | 31 mr r11,r3 /* r11 holds tv */ 32 mr r10,r4 /* r10 holds tz */ 71 mr r11,r4 /* r11 saves tp */ 178 mr r11,r3 /* r11 holds t */ 188 mr r3,r4
|
D | cacheflush.S | 31 mr r11,r3 34 mr r10,r3
|
D | datapage.S | 56 mr r4,r3
|
/linux-4.1.27/arch/x86/include/asm/uv/ |
D | uv_bau.h | 683 static inline void write_mmr_sw_ack(unsigned long mr) in write_mmr_sw_ack() argument 685 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); in write_mmr_sw_ack() 688 static inline void write_gmmr_sw_ack(int pnode, unsigned long mr) in write_gmmr_sw_ack() argument 690 write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); in write_gmmr_sw_ack() 703 static inline void write_mmr_data_config(int pnode, unsigned long mr) in write_mmr_data_config() argument 705 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); in write_mmr_data_config()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); in ipoib_transport_dev_init() 155 if (IS_ERR(priv->mr)) { in ipoib_transport_dev_init() 224 priv->tx_sge[i].lkey = priv->mr->lkey; in ipoib_transport_dev_init() 230 priv->rx_sge[0].lkey = priv->mr->lkey; in ipoib_transport_dev_init() 254 ib_dereg_mr(priv->mr); in ipoib_transport_dev_init() 288 if (ib_dereg_mr(priv->mr)) in ipoib_transport_dev_cleanup()
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_mm.c | 177 struct c2_mr *mr) in c2_nsmr_register_phys_kern() argument 230 wr->pd_id = mr->pd->pd_id; in c2_nsmr_register_phys_kern() 280 mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); in c2_nsmr_register_phys_kern() 294 cpu_to_be32(mr->ibmr.lkey), in c2_nsmr_register_phys_kern()
|
D | c2_provider.c | 336 struct c2_mr *mr; in c2_reg_phys_mr() local 388 mr = kmalloc(sizeof(*mr), GFP_KERNEL); in c2_reg_phys_mr() 389 if (!mr) { in c2_reg_phys_mr() 394 mr->pd = to_c2pd(ib_pd); in c2_reg_phys_mr() 395 mr->umem = NULL; in c2_reg_phys_mr() 405 c2_convert_access(acc), mr); in c2_reg_phys_mr() 408 kfree(mr); in c2_reg_phys_mr() 412 return &mr->ibmr; in c2_reg_phys_mr() 496 struct c2_mr *mr = to_c2mr(ib_mr); in c2_dereg_mr() local 505 if (mr->umem) in c2_dereg_mr() [all …]
|
D | c2.h | 534 struct c2_mr *mr);
|
/linux-4.1.27/arch/powerpc/crypto/ |
D | aes-spe-modes.S | 91 mr rKS,rKP; \ 231 mr rKP,rKS 267 mr rKP,rKS 306 mr rKP,rKS 368 mr rKP,rKS 393 mr rKP,rKS 430 mr rKP,rKS 460 mr rKP,rKS 513 mr rKP,rKT 525 mr rKP,rKS [all …]
|
D | aes-spe-keys.S | 93 mr r14,r8 /* apply LS_BOX to 4th temp */ 139 mr r14,r10 /* apply LS_BOX to 6th temp */ 193 mr r14,r12 /* apply LS_BOX to 8th temp */ 201 mr r14,r8
|
D | sha1-powerpc-asm.S | 169 mr RE(0),r20
|
/linux-4.1.27/drivers/infiniband/ulp/iser/ |
D | iser_memory.c | 396 reg->sge.lkey = device->mr->lkey; in iser_reg_dma() 397 reg->rkey = device->mr->rkey; in iser_reg_dma() 636 iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) in iser_inv_rkey() argument 643 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey() 645 rkey = ib_inc_rkey(mr->rkey); in iser_inv_rkey() 646 ib_update_fast_reg_key(mr, rkey); in iser_inv_rkey() 720 struct ib_mr *mr; in iser_fast_reg_mr() local 731 mr = desc->data_mr; in iser_fast_reg_mr() 734 mr = desc->pi_ctx->prot_mr; in iser_fast_reg_mr() 746 iser_inv_rkey(&inv_wr, mr); in iser_fast_reg_mr() [all …]
|
D | iser_verbs.c | 146 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | in iser_create_device_ib_res() 149 if (IS_ERR(device->mr)) in iser_create_device_ib_res() 160 ib_dereg_mr(device->mr); in iser_create_device_ib_res() 186 BUG_ON(device->mr == NULL); in iser_free_device_ib_res() 197 (void)ib_dereg_mr(device->mr); in iser_free_device_ib_res() 203 device->mr = NULL; in iser_free_device_ib_res() 1012 sge.lkey = ib_conn->device->mr->lkey; in iser_post_recvl()
|
D | iser_initiator.c | 176 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { in iser_create_send_desc() 177 tx_desc->tx_sg[0].lkey = device->mr->lkey; in iser_create_send_desc() 294 rx_sg->lkey = device->mr->lkey; in iser_alloc_rx_descriptors() 548 tx_dsg->lkey = device->mr->lkey; in iser_send_control()
|
D | iscsi_iser.h | 351 struct ib_mr *mr; member
|
/linux-4.1.27/drivers/net/ethernet/ibm/emac/ |
D | tah.c | 55 out_be32(&p->mr, TAH_MR_SR); in tah_reset() 57 while ((in_be32(&p->mr) & TAH_MR_SR) && n) in tah_reset() 65 out_be32(&p->mr, in tah_reset()
|
D | tah.h | 29 u32 mr; member
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
D | frwr_ops.c | 186 struct ib_mr *mr = frmr->fr_mr; in frwr_op_map() local 232 key = (u8)(mr->rkey & 0x000000FF); in frwr_op_map() 233 ib_update_fast_reg_key(mr, ++key); in frwr_op_map() 234 fastreg_wr.wr.fast_reg.rkey = mr->rkey; in frwr_op_map() 241 seg1->mr_rkey = mr->rkey; in frwr_op_map() 249 ib_update_fast_reg_key(mr, --key); in frwr_op_map()
|
D | svc_rdma_transport.c | 746 struct ib_mr *mr; in rdma_alloc_frmr() local 754 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); in rdma_alloc_frmr() 755 if (IS_ERR(mr)) in rdma_alloc_frmr() 763 frmr->mr = mr; in rdma_alloc_frmr() 769 ib_dereg_mr(mr); in rdma_alloc_frmr() 784 ib_dereg_mr(frmr->mr); in rdma_dealloc_frmr_q() 815 if (ib_dma_mapping_error(frmr->mr->device, addr)) in frmr_unmap_dma() 818 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE, in frmr_unmap_dma() 1242 key = (u8)(frmr->mr->lkey & 0x000000FF); in svc_rdma_fastreg() 1243 ib_update_fast_reg_key(frmr->mr, ++key); in svc_rdma_fastreg() [all …]
|
D | svc_rdma_recvfrom.c | 293 key = (u8)(frmr->mr->lkey & 0x000000FF); in rdma_read_chunk_frmr() 294 ib_update_fast_reg_key(frmr->mr, ++key); in rdma_read_chunk_frmr() 297 ctxt->sge[0].lkey = frmr->mr->lkey; in rdma_read_chunk_frmr() 312 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; in rdma_read_chunk_frmr() 325 read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; in rdma_read_chunk_frmr() 334 inv_wr.ex.invalidate_rkey = frmr->mr->lkey; in rdma_read_chunk_frmr()
|
D | verbs.c | 1452 struct ib_mr *mr; in rpcrdma_register_internal() local 1477 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1, in rpcrdma_register_internal() 1485 if (IS_ERR(mr)) { in rpcrdma_register_internal() 1487 rc = PTR_ERR(mr); in rpcrdma_register_internal() 1490 *mrp = mr; in rpcrdma_register_internal() 1491 iov->lkey = mr->lkey; in rpcrdma_register_internal() 1500 struct ib_mr *mr, struct ib_sge *iov) in rpcrdma_deregister_internal() argument 1507 if (NULL == mr) in rpcrdma_deregister_internal() 1510 rc = ib_dereg_mr(mr); in rpcrdma_deregister_internal()
|
/linux-4.1.27/arch/x86/kernel/ |
D | tboot.c | 178 struct tboot_mac_region *mr; in add_mac_region() local 185 mr = &tboot->mac_regions[tboot->num_mac_regions++]; in add_mac_region() 186 mr->start = round_down(start, PAGE_SIZE); in add_mac_region() 187 mr->size = round_up(end, PAGE_SIZE) - mr->start; in add_mac_region()
|
/linux-4.1.27/include/rdma/ |
D | ib_verbs.h | 1022 struct ib_mr *mr; member 1592 int (*rereg_user_mr)(struct ib_mr *mr, 1599 int (*query_mr)(struct ib_mr *mr, 1601 int (*dereg_mr)(struct ib_mr *mr); 1602 int (*destroy_mr)(struct ib_mr *mr); 1610 int (*rereg_phys_mr)(struct ib_mr *mr, 1653 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2428 int ib_rereg_phys_mr(struct ib_mr *mr, 2441 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 2450 int ib_dereg_mr(struct ib_mr *mr); [all …]
|
D | ib_mad.h | 369 struct ib_mr *mr; member
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_hv_rmhandlers.S | 267 13: mr r3, r12 614 mr r31, r4 619 mr r4, r31 911 mr r10,r0 913 mr r9, r4 991 mr r9, r4 1085 mr r4, r9 1119 mr r4,r9 1149 mr r4, r9 1170 mr r4, r9 [all …]
|
D | mpic.c | 1289 static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr) in add_mmio_region() argument 1296 opp->mmio_regions[opp->num_mmio_regions++] = mr; in add_mmio_region() 1345 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_read_internal() local 1347 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_read_internal() 1350 return mr->read(opp, addr - mr->start_addr, ptr); in kvm_mpic_read_internal() 1361 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_write_internal() local 1363 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_write_internal() 1366 return mr->write(opp, addr - mr->start_addr, val); in kvm_mpic_write_internal()
|
D | bookehv_interrupts.S | 179 mr r11, r4 309 mr r4, r11 337 mr r4, r11 445 mr r5, r14 /* intno */ 446 mr r14, r4 /* Save vcpu pointer. */ 450 mr r4, r14
|
D | book3s_segment.S | 25 mr reg, r13
|
D | booke_interrupts.S | 253 mr r14, r4 /* Save vcpu pointer. */ 258 mr r4, r14
|
/linux-4.1.27/drivers/spi/ |
D | spi-atmel.c | 302 u32 mr; in cs_activate() local 323 mr = spi_readl(as, MR); in cs_activate() 338 mr = spi_readl(as, MR); in cs_activate() 339 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); in cs_activate() 342 spi_writel(as, MR, mr); in cs_activate() 347 mr); in cs_activate() 354 u32 mr; in cs_deactivate() local 359 mr = spi_readl(as, MR); in cs_deactivate() 360 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { in cs_deactivate() 361 mr = SPI_BFINS(PCS, 0xf, mr); in cs_deactivate() [all …]
|
/linux-4.1.27/arch/powerpc/net/ |
D | bpf_jit_asm.S | 112 mr r4, r_addr; \ 164 mr r4, r_addr; \ 176 mr r_addr, r3; \
|
/linux-4.1.27/arch/m68k/ifpsp060/src/ |
D | ilsp.S | 546 mov.l %d0,%d2 # mr in d2 547 mov.l %d0,%d3 # mr in d3 549 swap %d3 # hi(mr) in lo d3 553 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 554 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) 555 mulu.w %d4,%d2 # [3] lo(mr) * hi(md) 556 mulu.w %d4,%d3 # [4] hi(mr) * hi(md) 672 mov.l %d0,%d2 # mr in d2 673 mov.l %d0,%d3 # mr in d3 675 swap %d3 # hi(mr) in lo d3 [all …]
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
D | ib_srp.h | 99 struct ib_mr *mr; member 244 struct ib_mr *mr; member
|
D | ib_srp.c | 346 if (d->mr) in srp_destroy_fr_pool() 347 ib_dereg_mr(d->mr); in srp_destroy_fr_pool() 365 struct ib_mr *mr; in srp_create_fr_pool() local 382 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len); in srp_create_fr_pool() 383 if (IS_ERR(mr)) { in srp_create_fr_pool() 384 ret = PTR_ERR(mr); in srp_create_fr_pool() 387 d->mr = mr; in srp_create_fr_pool() 1071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey); in srp_unmap_data() 1075 (*pfr)->mr->rkey, res); in srp_unmap_data() 1306 rkey = ib_inc_rkey(desc->mr->rkey); in srp_map_finish_fr() [all …]
|
/linux-4.1.27/include/net/netfilter/ |
D | nf_nat_redirect.h | 6 const struct nf_nat_ipv4_multi_range_compat *mr,
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.c | 1313 struct ib_mr *mr; in kiblnd_find_rd_dma_mr() local 1325 for (i = 0, mr = prev_mr = NULL; in kiblnd_find_rd_dma_mr() 1327 mr = kiblnd_find_dma_mr(hdev, in kiblnd_find_rd_dma_mr() 1331 prev_mr = mr; in kiblnd_find_rd_dma_mr() 1333 if (mr == NULL || prev_mr != mr) { in kiblnd_find_rd_dma_mr() 1335 mr = NULL; in kiblnd_find_rd_dma_mr() 1340 return mr; in kiblnd_find_rd_dma_mr() 1793 struct ib_mr *mr = pmr->pmr_mr; in kiblnd_pmr_pool_unmap() local 1797 if (mr != NULL) in kiblnd_pmr_pool_unmap() 1798 ib_dereg_mr(mr); in kiblnd_pmr_pool_unmap() [all …]
|
D | o2iblnd_cb.c | 151 struct ib_mr *mr; in kiblnd_post_rx() local 160 mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); in kiblnd_post_rx() 161 LASSERT(mr != NULL); in kiblnd_post_rx() 163 rx->rx_sge.lkey = mr->lkey; in kiblnd_post_rx() 660 struct ib_mr *mr = NULL; in kiblnd_map_tx() local 682 mr = kiblnd_find_rd_dma_mr(hdev, rd); in kiblnd_map_tx() 683 if (mr != NULL) { in kiblnd_map_tx() 685 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; in kiblnd_map_tx() 1058 struct ib_mr *mr; in kiblnd_init_tx_msg() local 1066 mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); in kiblnd_init_tx_msg() [all …]
|
/linux-4.1.27/drivers/ipack/devices/ |
D | scc2698.h | 26 u8 d0, mr; /* Mode register 1/2*/ member 33 u8 d0, mr; /* Mode register 1/2 */ member
|
D | ipoctal.c | 335 &channel->regs->w.mr); /* mr1 */ in ipoctal_inst_slot() 336 iowrite8(0, &channel->regs->w.mr); /* mr2 */ in ipoctal_inst_slot() 606 iowrite8(mr1, &channel->regs->w.mr); in ipoctal_set_termios() 607 iowrite8(mr2, &channel->regs->w.mr); in ipoctal_set_termios()
|
/linux-4.1.27/tools/testing/selftests/powerpc/switch_endian/ |
D | check.S | 12 mr r9,r15 96 1: mr r3, r9
|
D | switch_endian_test.S | 24 mr r3, r15
|
/linux-4.1.27/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 236 rx_sg->lkey = device->mr->lkey; in isert_alloc_rx_descriptors() 383 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE); in isert_create_device_ib_res() 384 if (IS_ERR(device->mr)) { in isert_create_device_ib_res() 385 ret = PTR_ERR(device->mr); in isert_create_device_ib_res() 409 ib_dereg_mr(device->mr); in isert_free_device_ib_res() 1082 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { in isert_create_send_desc() 1083 tx_desc->tx_sg[0].lkey = device->mr->lkey; in isert_create_send_desc() 1106 tx_desc->tx_sg[0].lkey = device->mr->lkey; in isert_init_tx_hdrs() 1139 sge.lkey = isert_conn->device->mr->lkey; in isert_rdma_post_recvl() 1189 tx_dsg->lkey = isert_conn->device->mr->lkey; in isert_put_login_tx() [all …]
|
D | ib_isert.h | 213 struct ib_mr *mr; member
|
/linux-4.1.27/arch/s390/kernel/vdso32/ |
D | clock_gettime.S | 48 mr %r0,%r0 111 mr %r0,%r0
|
D | gettimeofday.S | 43 mr %r0,%r0
|
/linux-4.1.27/drivers/dma/ |
D | fsldma.h | 109 u32 mr; /* 0x00 - Mode Register */ member 143 u32 mr; member
|
D | fsldma.c | 66 DMA_OUT(chan, &chan->regs->mr, val, 32); in set_mr() 71 return DMA_IN(chan, &chan->regs->mr, 32); in get_mr() 1462 chan->regs_save.mr = get_mr(chan); in fsldma_suspend_late() 1493 mode = chan->regs_save.mr in fsldma_resume_early()
|
/linux-4.1.27/arch/powerpc/platforms/52xx/ |
D | lite5200_sleep.S | 44 mr r7, r3 /* save SRAM va */ 45 mr r8, r4 /* save MBAR va */ 76 mr r4, r7
|
/linux-4.1.27/arch/powerpc/boot/ |
D | div64.S | 30 1: mr r11,r5 # here dividend.hi != 0 58 mr r3,r6 # return the remainder in r3
|
D | ps3-hvcall.S | 140 mr r4, r3
|
D | crtsavres.S | 231 mr 1,11
|
/linux-4.1.27/include/linux/mlx5/ |
D | driver.h | 688 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 692 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); 693 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 695 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
/linux-4.1.27/drivers/media/usb/usbvision/ |
D | usbvision.h | 192 #define YUV_TO_RGB_BY_THE_BOOK(my, mu, mv, mr, mg, mb) { \ argument 203 mr = LIMIT_RGB(mm_r); \
|
/linux-4.1.27/tools/testing/selftests/powerpc/copyloops/ |
D | copyuser_64.S | 72 mr r9,r7 73 mr r8,r6 325 mr r4,r3 326 mr r3,r5 /* return the number of bytes not copied */
|
D | memcpy_64.S | 67 mr r8,r9
|
/linux-4.1.27/include/linux/mlx4/ |
D | device.h | 665 struct mlx4_mr mr; member 1040 int npages, int page_shift, struct mlx4_mr *mr); 1041 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); 1042 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); 1437 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr); 1438 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
D | fb.h | 146 u32 mr[16]; member
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
D | opal-wrappers.S | 114 mr r3,r0 150 mr r4,r3
|
/linux-4.1.27/drivers/message/fusion/ |
D | mptscsih.c | 588 mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) in mptscsih_io_done() argument 600 req_idx_MR = (mr != NULL) ? in mptscsih_io_done() 601 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; in mptscsih_io_done() 645 pScsiReply = (SCSIIOReply_t *) mr; in mptscsih_io_done() 650 ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag)); in mptscsih_io_done() 654 ioc->name, mf, mr, sc, req_idx)); in mptscsih_io_done() 2060 MPT_FRAME_HDR *mr) in mptscsih_taskmgmt_complete() argument 2063 "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr)); in mptscsih_taskmgmt_complete() 2067 if (!mr) in mptscsih_taskmgmt_complete() 2071 memcpy(ioc->taskmgmt_cmds.reply, mr, in mptscsih_taskmgmt_complete() [all …]
|
D | mptbase.c | 450 MPT_FRAME_HDR *mr = NULL; in mpt_turbo_reply() local 482 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa); in mpt_turbo_reply() 486 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa); in mpt_turbo_reply() 501 if (MptCallbacks[cb_idx](ioc, mf, mr)) in mpt_turbo_reply() 511 MPT_FRAME_HDR *mr; in mpt_reply() local 530 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames + in mpt_reply() 533 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx); in mpt_reply() 534 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; in mpt_reply() 538 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function)); in mpt_reply() 539 DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr); in mpt_reply() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 502 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, in c4iw_reregister_phys_mem() argument 516 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); in c4iw_reregister_phys_mem() 519 if (atomic_read(&mr->usecnt)) in c4iw_reregister_phys_mem() 522 mhp = to_c4iw_mr(mr); in c4iw_reregister_phys_mem() 524 php = to_c4iw_pd(mr->pd); in c4iw_reregister_phys_mem()
|
/linux-4.1.27/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.h | 397 struct ib_mr *mr; member
|
D | ib_srpt.c | 786 list.lkey = sdev->mr->lkey; in srpt_post_recv() 821 list.lkey = sdev->mr->lkey; in srpt_post_send() 1209 sge->lkey = ch->sport->sdev->mr->lkey; in srpt_map_sg_to_ib_sge() 3163 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE); in srpt_add_one() 3164 if (IS_ERR(sdev->mr)) in srpt_add_one() 3264 ib_dereg_mr(sdev->mr); in srpt_add_one() 3310 ib_dereg_mr(sdev->mr); in srpt_remove_one()
|
/linux-4.1.27/arch/x86/platform/uv/ |
D | tlb_uv.c | 249 unsigned long mr; in bau_process_retry_msg() local 257 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; in bau_process_retry_msg() 258 write_mmr_sw_ack(mr); in bau_process_retry_msg() 383 unsigned long mr; in do_reset() local 393 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; in do_reset() 396 write_mmr_sw_ack(mr); in do_reset()
|
/linux-4.1.27/firmware/dsp56k/ |
D | bootstrap.asm | 59 and #<$fe,mr
|
/linux-4.1.27/include/linux/sunrpc/ |
D | svc_rdma.h | 107 struct ib_mr *mr; member
|
/linux-4.1.27/tools/testing/selftests/powerpc/stringloops/ |
D | memcmp_64.S | 81 mr r3,rC
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.c | 549 static int iwch_reregister_phys_mem(struct ib_mr *mr, in iwch_reregister_phys_mem() argument 566 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); in iwch_reregister_phys_mem() 569 if (atomic_read(&mr->usecnt)) in iwch_reregister_phys_mem() 572 mhp = to_iwch_mr(mr); in iwch_reregister_phys_mem() 574 php = to_iwch_pd(mr->pd); in iwch_reregister_phys_mem()
|
/linux-4.1.27/Documentation/cgroups/ |
D | devices.txt | 23 echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
|