Home
last modified time | relevance | path

Searched refs:mr (Results 1 – 200 of 237) sorted by relevance

12

/linux-4.4.14/drivers/staging/rdma/hfi1/
Dmr.c59 struct hfi1_mregion mr; /* must be last */ member
67 static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd, in init_mregion() argument
75 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); in init_mregion()
76 if (!mr->map[i]) in init_mregion()
79 mr->mapsz = m; in init_mregion()
80 init_completion(&mr->comp); in init_mregion()
82 atomic_set(&mr->refcount, 1); in init_mregion()
83 mr->pd = pd; in init_mregion()
84 mr->max_segs = count; in init_mregion()
89 kfree(mr->map[--i]); in init_mregion()
[all …]
Dkeys.c66 int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region) in hfi1_alloc_lkey() argument
72 struct hfi1_ibdev *dev = to_idev(mr->pd->device); in hfi1_alloc_lkey()
75 hfi1_get_mr(mr); in hfi1_alloc_lkey()
84 rcu_assign_pointer(dev->dma_mr, mr); in hfi1_alloc_lkey()
85 mr->lkey_published = 1; in hfi1_alloc_lkey()
87 hfi1_put_mr(mr); in hfi1_alloc_lkey()
112 mr->lkey = (r << (32 - hfi1_lkey_table_size)) | in hfi1_alloc_lkey()
115 if (mr->lkey == 0) { in hfi1_alloc_lkey()
116 mr->lkey |= 1 << 8; in hfi1_alloc_lkey()
119 rcu_assign_pointer(rkt->table[r], mr); in hfi1_alloc_lkey()
[all …]
Dverbs.h330 struct hfi1_mregion *mr; member
342 struct hfi1_mregion mr; /* must be last */ member
966 int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region);
968 void hfi1_free_lkey(struct hfi1_mregion *mr);
1039 static inline void hfi1_get_mr(struct hfi1_mregion *mr) in hfi1_get_mr() argument
1041 atomic_inc(&mr->refcount); in hfi1_get_mr()
1044 static inline void hfi1_put_mr(struct hfi1_mregion *mr) in hfi1_put_mr() argument
1046 if (unlikely(atomic_dec_and_test(&mr->refcount))) in hfi1_put_mr()
1047 complete(&mr->comp); in hfi1_put_mr()
1053 hfi1_put_mr(ss->sge.mr); in hfi1_put_ss()
Dverbs.c297 hfi1_put_mr(sge->mr); in hfi1_copy_sge()
300 } else if (sge->length == 0 && sge->mr->lkey) { in hfi1_copy_sge()
302 if (++sge->m >= sge->mr->mapsz) in hfi1_copy_sge()
307 sge->mr->map[sge->m]->segs[sge->n].vaddr; in hfi1_copy_sge()
309 sge->mr->map[sge->m]->segs[sge->n].length; in hfi1_copy_sge()
338 hfi1_put_mr(sge->mr); in hfi1_skip_sge()
341 } else if (sge->length == 0 && sge->mr->lkey) { in hfi1_skip_sge()
343 if (++sge->m >= sge->mr->mapsz) in hfi1_skip_sge()
348 sge->mr->map[sge->m]->segs[sge->n].vaddr; in hfi1_skip_sge()
350 sge->mr->map[sge->m]->segs[sge->n].length; in hfi1_skip_sge()
[all …]
Drc.c118 if (e->rdma_sge.mr) { in make_rc_ack()
119 hfi1_put_mr(e->rdma_sge.mr); in make_rc_ack()
120 e->rdma_sge.mr = NULL; in make_rc_ack()
150 if (len && !e->rdma_sge.mr) { in make_rc_ack()
155 qp->s_rdma_mr = e->rdma_sge.mr; in make_rc_ack()
194 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; in make_rc_ack()
1031 hfi1_put_mr(sge->mr); in hfi1_rc_send_complete()
1088 hfi1_put_mr(sge->mr); in do_rc_completion()
1732 if (e->rdma_sge.mr) { in rc_rcv_error()
1733 hfi1_put_mr(e->rdma_sge.mr); in rc_rcv_error()
[all …]
Druc.c132 hfi1_put_mr(sge->mr); in init_sge()
527 hfi1_put_mr(qp->r_sge.sge.mr); in ruc_loopback()
551 hfi1_put_mr(sge->mr); in ruc_loopback()
554 } else if (sge->length == 0 && sge->mr->lkey) { in ruc_loopback()
556 if (++sge->m >= sge->mr->mapsz) in ruc_loopback()
561 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ruc_loopback()
563 sge->mr->map[sge->m]->segs[sge->n].length; in ruc_loopback()
910 hfi1_put_mr(sge->mr); in hfi1_send_complete()
DMakefile11 init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \
Dud.c213 } else if (sge->length == 0 && sge->mr->lkey) { in ud_loopback()
215 if (++sge->m >= sge->mr->mapsz) in ud_loopback()
220 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ud_loopback()
222 sge->mr->map[sge->m]->segs[sge->n].length; in ud_loopback()
Dqp.c420 hfi1_put_mr(sge->mr); in clear_mr_refs()
442 e->rdma_sge.mr) { in clear_mr_refs()
443 hfi1_put_mr(e->rdma_sge.mr); in clear_mr_refs()
444 e->rdma_sge.mr = NULL; in clear_mr_refs()
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_mr.c42 struct qib_mregion mr; /* must be last */ member
50 static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, in init_qib_mregion() argument
58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); in init_qib_mregion()
59 if (!mr->map[i]) in init_qib_mregion()
62 mr->mapsz = m; in init_qib_mregion()
63 init_completion(&mr->comp); in init_qib_mregion()
65 atomic_set(&mr->refcount, 1); in init_qib_mregion()
66 mr->pd = pd; in init_qib_mregion()
67 mr->max_segs = count; in init_qib_mregion()
72 kfree(mr->map[--i]); in init_qib_mregion()
[all …]
Dqib_keys.c49 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) in qib_alloc_lkey() argument
55 struct qib_ibdev *dev = to_idev(mr->pd->device); in qib_alloc_lkey()
66 qib_get_mr(mr); in qib_alloc_lkey()
67 rcu_assign_pointer(dev->dma_mr, mr); in qib_alloc_lkey()
68 mr->lkey_published = 1; in qib_alloc_lkey()
93 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | in qib_alloc_lkey()
96 if (mr->lkey == 0) { in qib_alloc_lkey()
97 mr->lkey |= 1 << 8; in qib_alloc_lkey()
100 qib_get_mr(mr); in qib_alloc_lkey()
101 rcu_assign_pointer(rkt->table[r], mr); in qib_alloc_lkey()
[all …]
Dqib_verbs.c187 qib_put_mr(sge->mr); in qib_copy_sge()
190 } else if (sge->length == 0 && sge->mr->lkey) { in qib_copy_sge()
192 if (++sge->m >= sge->mr->mapsz) in qib_copy_sge()
197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_sge()
199 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_sge()
228 qib_put_mr(sge->mr); in qib_skip_sge()
231 } else if (sge->length == 0 && sge->mr->lkey) { in qib_skip_sge()
233 if (++sge->m >= sge->mr->mapsz) in qib_skip_sge()
238 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_skip_sge()
240 sge->mr->map[sge->m]->segs[sge->n].length; in qib_skip_sge()
[all …]
Dqib_verbs.h320 struct qib_mregion *mr; member
334 struct qib_mregion mr; /* must be last */ member
989 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
991 void qib_free_lkey(struct qib_mregion *mr);
1065 static inline void qib_get_mr(struct qib_mregion *mr) in qib_get_mr() argument
1067 atomic_inc(&mr->refcount); in qib_get_mr()
1072 static inline void qib_put_mr(struct qib_mregion *mr) in qib_put_mr() argument
1074 if (unlikely(atomic_dec_and_test(&mr->refcount))) in qib_put_mr()
1075 call_rcu(&mr->list, mr_rcu_callback); in qib_put_mr()
1081 qib_put_mr(ss->sge.mr); in qib_put_ss()
Dqib_rc.c97 if (e->rdma_sge.mr) { in qib_make_rc_ack()
98 qib_put_mr(e->rdma_sge.mr); in qib_make_rc_ack()
99 e->rdma_sge.mr = NULL; in qib_make_rc_ack()
129 if (len && !e->rdma_sge.mr) { in qib_make_rc_ack()
134 qp->s_rdma_mr = e->rdma_sge.mr; in qib_make_rc_ack()
173 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; in qib_make_rc_ack()
1017 qib_put_mr(sge->mr); in qib_rc_send_complete()
1073 qib_put_mr(sge->mr); in do_rc_completion()
1734 if (e->rdma_sge.mr) { in qib_rc_rcv_error()
1735 qib_put_mr(e->rdma_sge.mr); in qib_rc_rcv_error()
[all …]
Dqib_ruc.c114 qib_put_mr(sge->mr); in qib_init_sge()
505 qib_put_mr(qp->r_sge.sge.mr); in qib_ruc_loopback()
529 qib_put_mr(sge->mr); in qib_ruc_loopback()
532 } else if (sge->length == 0 && sge->mr->lkey) { in qib_ruc_loopback()
534 if (++sge->m >= sge->mr->mapsz) in qib_ruc_loopback()
539 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ruc_loopback()
541 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ruc_loopback()
783 qib_put_mr(sge->mr); in qib_send_complete()
Dqib_ud.c191 } else if (sge->length == 0 && sge->mr->lkey) { in qib_ud_loopback()
193 if (++sge->m >= sge->mr->mapsz) in qib_ud_loopback()
198 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ud_loopback()
200 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ud_loopback()
Dqib_sdma.c623 } else if (sge->length == 0 && sge->mr->lkey) { in qib_sdma_verbs_send()
625 if (++sge->m >= sge->mr->mapsz) in qib_sdma_verbs_send()
630 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_sdma_verbs_send()
632 sge->mr->map[sge->m]->segs[sge->n].length; in qib_sdma_verbs_send()
Dqib_qp.c435 qib_put_mr(sge->mr); in clear_mr_refs()
457 e->rdma_sge.mr) { in clear_mr_refs()
458 qib_put_mr(e->rdma_sge.mr); in clear_mr_refs()
459 e->rdma_sge.mr = NULL; in clear_mr_refs()
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_mr.c46 struct ipath_mregion mr; /* must be last */ member
65 struct ipath_mr *mr; in ipath_get_dma_mr() local
68 mr = kzalloc(sizeof *mr, GFP_KERNEL); in ipath_get_dma_mr()
69 if (!mr) { in ipath_get_dma_mr()
74 mr->mr.access_flags = acc; in ipath_get_dma_mr()
75 ret = &mr->ibmr; in ipath_get_dma_mr()
84 struct ipath_mr *mr; in alloc_mr() local
89 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); in alloc_mr()
90 if (!mr) in alloc_mr()
95 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); in alloc_mr()
[all …]
Dipath_keys.c47 int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) in ipath_alloc_lkey() argument
75 mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) | in ipath_alloc_lkey()
78 if (mr->lkey == 0) { in ipath_alloc_lkey()
79 mr->lkey |= 1 << 8; in ipath_alloc_lkey()
82 rkt->table[r] = mr; in ipath_alloc_lkey()
125 struct ipath_mregion *mr; in ipath_lkey_ok() local
142 isge->mr = NULL; in ipath_lkey_ok()
149 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; in ipath_lkey_ok()
150 if (unlikely(mr == NULL || mr->lkey != sge->lkey || in ipath_lkey_ok()
151 qp->ibqp.pd != mr->pd)) { in ipath_lkey_ok()
[all …]
Dipath_verbs.c190 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_copy_sge()
192 if (++sge->m >= sge->mr->mapsz) in ipath_copy_sge()
197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_copy_sge()
199 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_copy_sge()
229 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_skip_sge()
231 if (++sge->m >= sge->mr->mapsz) in ipath_skip_sge()
236 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_skip_sge()
238 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_skip_sge()
276 } else if (sge.length == 0 && sge.mr != NULL) { in ipath_count_sge()
278 if (++sge.m >= sge.mr->mapsz) in ipath_count_sge()
[all …]
Dipath_ud.c199 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_ud_loopback()
201 if (++sge->m >= sge->mr->mapsz) in ipath_ud_loopback()
206 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ud_loopback()
208 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ud_loopback()
Dipath_ruc.c414 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_ruc_loopback()
416 if (++sge->m >= sge->mr->mapsz) in ipath_ruc_loopback()
421 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ruc_loopback()
423 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ruc_loopback()
Dipath_sdma.c765 } else if (sge->length == 0 && sge->mr != NULL) { in ipath_sdma_verbs_send()
767 if (++sge->m >= sge->mr->mapsz) in ipath_sdma_verbs_send()
772 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_sdma_verbs_send()
774 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_sdma_verbs_send()
Dipath_verbs.h259 struct ipath_mregion *mr; member
271 struct ipath_mregion mr; /* must be last */ member
789 struct ipath_mregion *mr);
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
Dgddr5.c75 ram->mr[0] &= ~0xf7f; in nvkm_gddr5_calc()
76 ram->mr[0] |= (WR & 0x0f) << 8; in nvkm_gddr5_calc()
77 ram->mr[0] |= (CL & 0x0f) << 3; in nvkm_gddr5_calc()
78 ram->mr[0] |= (WL & 0x07) << 0; in nvkm_gddr5_calc()
80 ram->mr[1] &= ~0x0bf; in nvkm_gddr5_calc()
81 ram->mr[1] |= (xd & 0x01) << 7; in nvkm_gddr5_calc()
82 ram->mr[1] |= (at[0] & 0x03) << 4; in nvkm_gddr5_calc()
83 ram->mr[1] |= (dt & 0x03) << 2; in nvkm_gddr5_calc()
84 ram->mr[1] |= (ds & 0x03) << 0; in nvkm_gddr5_calc()
89 ram->mr1_nuts = ram->mr[1]; in nvkm_gddr5_calc()
[all …]
Dsddr3.c91 ODT = (ram->mr[1] & 0x004) >> 2 | in nvkm_sddr3_calc()
92 (ram->mr[1] & 0x040) >> 5 | in nvkm_sddr3_calc()
93 (ram->mr[1] & 0x200) >> 7; in nvkm_sddr3_calc()
105 ram->mr[0] &= ~0xf74; in nvkm_sddr3_calc()
106 ram->mr[0] |= (WR & 0x07) << 9; in nvkm_sddr3_calc()
107 ram->mr[0] |= (CL & 0x0e) << 3; in nvkm_sddr3_calc()
108 ram->mr[0] |= (CL & 0x01) << 2; in nvkm_sddr3_calc()
110 ram->mr[1] &= ~0x245; in nvkm_sddr3_calc()
111 ram->mr[1] |= (ODT & 0x1) << 2; in nvkm_sddr3_calc()
112 ram->mr[1] |= (ODT & 0x2) << 5; in nvkm_sddr3_calc()
[all …]
Dgddr3.c89 DLL = !(ram->mr[1] & 0x1); in nvkm_gddr3_calc()
90 RON = !(ram->mr[1] & 0x300) >> 8; in nvkm_gddr3_calc()
98 ODT = (ram->mr[1] & 0xc) >> 2; in nvkm_gddr3_calc()
101 hi = ram->mr[2] & 0x1; in nvkm_gddr3_calc()
107 ram->mr[0] &= ~0xf74; in nvkm_gddr3_calc()
108 ram->mr[0] |= (CWL & 0x07) << 9; in nvkm_gddr3_calc()
109 ram->mr[0] |= (CL & 0x07) << 4; in nvkm_gddr3_calc()
110 ram->mr[0] |= (CL & 0x08) >> 1; in nvkm_gddr3_calc()
112 ram->mr[1] &= ~0x3fc; in nvkm_gddr3_calc()
113 ram->mr[1] |= (ODT & 0x03) << 2; in nvkm_gddr3_calc()
[all …]
Dsddr2.c81 ODT = (ram->mr[1] & 0x004) >> 2 | in nvkm_sddr2_calc()
82 (ram->mr[1] & 0x040) >> 5; in nvkm_sddr2_calc()
90 ram->mr[0] &= ~0xf70; in nvkm_sddr2_calc()
91 ram->mr[0] |= (WR & 0x07) << 9; in nvkm_sddr2_calc()
92 ram->mr[0] |= (CL & 0x07) << 4; in nvkm_sddr2_calc()
94 ram->mr[1] &= ~0x045; in nvkm_sddr2_calc()
95 ram->mr[1] |= (ODT & 0x1) << 2; in nvkm_sddr2_calc()
96 ram->mr[1] |= (ODT & 0x2) << 5; in nvkm_sddr2_calc()
97 ram->mr[1] |= !DLL; in nvkm_sddr2_calc()
Dramnv50.c186 ram_mask(hwsq, mr[0], 0x100, 0x100); in nvkm_sddr2_dll_reset()
187 ram_mask(hwsq, mr[0], 0x100, 0x000); in nvkm_sddr2_dll_reset()
287 ram->base.mr[0] = ram_rd32(hwsq, mr[0]); in nv50_ram_calc()
288 ram->base.mr[1] = ram_rd32(hwsq, mr[1]); in nv50_ram_calc()
289 ram->base.mr[2] = ram_rd32(hwsq, mr[2]); in nv50_ram_calc()
377 ram_nuke(hwsq, mr[0]); /* force update */ in nv50_ram_calc()
378 ram_mask(hwsq, mr[0], 0x000, 0x000); in nv50_ram_calc()
381 ram_nuke(hwsq, mr[1]); /* force update */ in nv50_ram_calc()
382 ram_wr32(hwsq, mr[1], ram->base.mr[1]); in nv50_ram_calc()
383 ram_nuke(hwsq, mr[0]); /* force update */ in nv50_ram_calc()
[all …]
Dramgt215.c426 ram_mask(fuc, mr[0], 0x100, 0x100); in nvkm_sddr2_dll_reset()
428 ram_mask(fuc, mr[0], 0x100, 0x000); in nvkm_sddr2_dll_reset()
433 nvkm_sddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr) in nvkm_sddr3_dll_disable() argument
435 u32 mr1_old = ram_rd32(fuc, mr[1]); in nvkm_sddr3_dll_disable()
439 ram_wr32(fuc, mr[1], mr[1]); in nvkm_sddr3_dll_disable()
445 nvkm_gddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr) in nvkm_gddr3_dll_disable() argument
447 u32 mr1_old = ram_rd32(fuc, mr[1]); in nvkm_gddr3_dll_disable()
450 ram_wr32(fuc, mr[1], mr[1]); in nvkm_gddr3_dll_disable()
564 ram->base.mr[0] = ram_rd32(fuc, mr[0]); in gt215_ram_calc()
565 ram->base.mr[1] = ram_rd32(fuc, mr[1]); in gt215_ram_calc()
[all …]
Dramgk104.c265 if ((ram->base.mr[1] & 0x03c) != 0x030) { in gk104_ram_calc_gddr5()
266 ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c); in gk104_ram_calc_gddr5()
267 ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000); in gk104_ram_calc_gddr5()
590 ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]); in gk104_ram_calc_gddr5()
591 ram_wr32(fuc, mr[0], ram->base.mr[0]); in gk104_ram_calc_gddr5()
592 ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]); in gk104_ram_calc_gddr5()
594 ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]); in gk104_ram_calc_gddr5()
595 ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */ in gk104_ram_calc_gddr5()
596 ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]); in gk104_ram_calc_gddr5()
597 ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]); in gk104_ram_calc_gddr5()
[all …]
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dmr.c59 struct mlx4_ib_mr *mr; in mlx4_ib_get_dma_mr() local
62 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_get_dma_mr()
63 if (!mr) in mlx4_ib_get_dma_mr()
67 ~0ull, convert_access(acc), 0, 0, &mr->mmr); in mlx4_ib_get_dma_mr()
71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
75 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_get_dma_mr()
76 mr->umem = NULL; in mlx4_ib_get_dma_mr()
78 return &mr->ibmr; in mlx4_ib_get_dma_mr()
81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
84 kfree(mr); in mlx4_ib_get_dma_mr()
[all …]
DMakefile3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
Dmlx4_ib.h705 int mlx4_ib_dereg_mr(struct ib_mr *mr);
864 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
/linux-4.4.14/drivers/scsi/
Dmesh.c304 volatile struct mesh_regs __iomem *mr = ms->mesh; in mesh_dump_regs() local
310 ms, mr, md); in mesh_dump_regs()
313 (mr->count_hi << 8) + mr->count_lo, mr->sequence, in mesh_dump_regs()
314 (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, in mesh_dump_regs()
315 mr->exception, mr->error, mr->intr_mask, mr->interrupt, in mesh_dump_regs()
316 mr->sync_params); in mesh_dump_regs()
317 while(in_8(&mr->fifo_count)) in mesh_dump_regs()
318 printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); in mesh_dump_regs()
338 static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) in mesh_flush_io() argument
340 (void)in_8(&mr->mesh_id); in mesh_flush_io()
[all …]
Dqla1280.c1145 uint8_t mr; in qla1280_set_target_parameters() local
1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; in qla1280_set_target_parameters()
1171 mr |= BIT_6; in qla1280_set_target_parameters()
1177 status = qla1280_mailbox_command(ha, mr, mb); in qla1280_set_target_parameters()
2459 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) in qla1280_mailbox_command() argument
2485 if (mr & BIT_0) { in qla1280_mailbox_command()
2489 mr >>= 1; in qla1280_mailbox_command()
2532 mr = MAILBOX_REGISTER_COUNT; in qla1280_mailbox_command()
DNCR5380.c410 unsigned char status, data, basr, mr, icr, i; in NCR5380_print() local
415 mr = NCR5380_read(MODE_REG); in NCR5380_print()
431 printk("\nMODE: %02x ", mr); in NCR5380_print()
433 if (mr & mrs[i].mask) in NCR5380_print()
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dmr.c56 static int clean_mr(struct mlx5_ib_mr *mr);
58 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); in destroy_mkey()
82 struct mlx5_ib_mr *mr = context; in reg_mr_callback() local
83 struct mlx5_ib_dev *dev = mr->dev; in reg_mr_callback()
85 int c = order2idx(dev, mr->order); in reg_mr_callback()
97 kfree(mr); in reg_mr_callback()
103 if (mr->out.hdr.status) { in reg_mr_callback()
105 mr->out.hdr.status, in reg_mr_callback()
106 be32_to_cpu(mr->out.hdr.syndrome)); in reg_mr_callback()
[all …]
Dodp.c49 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local
60 mr = umem->odp_data->private; in mlx5_ib_invalidate_range()
62 if (!mr || !mr->ibmr.pd) in mlx5_ib_invalidate_range()
93 mlx5_ib_update_mtt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
100 mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1, in mlx5_ib_invalidate_range()
146 struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr); in mlx5_ib_odp_find_mr_lkey() local
148 if (!mmr || mmr->key != key || !mr->live) in mlx5_ib_odp_find_mr_lkey()
188 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local
192 mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key); in pagefault_single_data_segment()
198 if (!mr || !mr->ibmr.pd) { in pagefault_single_data_segment()
[all …]
DMakefile3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
Dcq.c421 struct mlx5_ib_mr *mr; in mlx5_poll_one() local
535 mr = to_mibmr(mmr); in mlx5_poll_one()
536 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); in mlx5_poll_one()
537 mr->sig->sig_err_exists = true; in mlx5_poll_one()
538 mr->sig->sigerr_count++; in mlx5_poll_one()
541 cq->mcq.cqn, mr->sig->err_item.key, in mlx5_poll_one()
542 mr->sig->err_item.err_type, in mlx5_poll_one()
543 mr->sig->err_item.sig_err_offset, in mlx5_poll_one()
544 mr->sig->err_item.expected, in mlx5_poll_one()
545 mr->sig->err_item.actual); in mlx5_poll_one()
Dqp.c1900 struct mlx5_ib_mr *mr) in set_reg_umr_seg() argument
1902 int ndescs = mr->ndescs; in set_reg_umr_seg()
1993 struct mlx5_ib_mr *mr, in set_reg_mkey_seg() argument
1996 int ndescs = ALIGN(mr->ndescs, 8) >> 1; in set_reg_mkey_seg()
2002 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
2003 seg->len = cpu_to_be64(mr->ibmr.length); in set_reg_mkey_seg()
2005 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg()
2036 struct mlx5_ib_mr *mr, in set_reg_data_seg() argument
2039 int bcount = mr->desc_size * mr->ndescs; in set_reg_data_seg()
2041 dseg->addr = cpu_to_be64(mr->desc_map); in set_reg_data_seg()
[all …]
/linux-4.4.14/arch/powerpc/platforms/pseries/
DhvCall.S65 mr r4,r3; \
66 mr r3,r0; \
78 mr r5,BUFREG; \
140 mr r4,r5
141 mr r5,r6
142 mr r6,r7
143 mr r7,r8
144 mr r8,r9
145 mr r9,r10
165 mr r0,r4
[all …]
/linux-4.4.14/net/rds/
Drdma.c71 struct rds_mr *mr; in rds_mr_tree_walk() local
75 mr = rb_entry(parent, struct rds_mr, r_rb_node); in rds_mr_tree_walk()
77 if (key < mr->r_key) in rds_mr_tree_walk()
79 else if (key > mr->r_key) in rds_mr_tree_walk()
82 return mr; in rds_mr_tree_walk()
96 static void rds_destroy_mr(struct rds_mr *mr) in rds_destroy_mr() argument
98 struct rds_sock *rs = mr->r_sock; in rds_destroy_mr()
103 mr->r_key, atomic_read(&mr->r_refcount)); in rds_destroy_mr()
105 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) in rds_destroy_mr()
109 if (!RB_EMPTY_NODE(&mr->r_rb_node)) in rds_destroy_mr()
[all …]
Diw_rdma.c49 struct ib_mr *mr; member
606 *key_ret = ibmr->mr->rkey;
645 struct ib_mr *mr; local
648 mr = ib_alloc_mr(rds_iwdev->pd, IB_MR_TYPE_MEM_REG,
650 if (IS_ERR(mr)) {
651 err = PTR_ERR(mr);
657 ibmr->mr = mr;
669 n = ib_map_mr_sg_zbva(ibmr->mr, m_sg->list, m_sg->len, PAGE_SIZE);
677 reg_wr.mr = ibmr->mr;
689 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
[all …]
Diw.c94 rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd, in rds_iw_add_one()
98 if (IS_ERR(rds_iwdev->mr)) in rds_iw_add_one()
101 rds_iwdev->mr = NULL; in rds_iw_add_one()
118 if (rds_iwdev->mr) in rds_iw_add_one()
119 ib_dereg_mr(rds_iwdev->mr); in rds_iw_add_one()
148 if (rds_iwdev->mr) in rds_iw_remove_one()
149 ib_dereg_mr(rds_iwdev->mr); in rds_iw_remove_one()
Drds.h767 void __rds_put_mr_final(struct rds_mr *mr);
768 static inline void rds_mr_put(struct rds_mr *mr) in rds_mr_put() argument
770 if (atomic_dec_and_test(&mr->r_refcount)) in rds_mr_put()
771 __rds_put_mr_final(mr); in rds_mr_put()
Diw.h187 struct ib_mr *mr; member
/linux-4.4.14/arch/x86/mm/
Dinit.c189 static int __meminit save_mr(struct map_range *mr, int nr_range, in save_mr() argument
196 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr()
197 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr()
198 mr[nr_range].page_size_mask = page_size_mask; in save_mr()
209 static void __init_refok adjust_range_page_size_mask(struct map_range *mr, in adjust_range_page_size_mask() argument
216 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { in adjust_range_page_size_mask()
217 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask()
218 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask()
226 mr[i].page_size_mask |= 1<<PG_LEVEL_2M; in adjust_range_page_size_mask()
229 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { in adjust_range_page_size_mask()
[all …]
/linux-4.4.14/drivers/rtc/
Drtc-at91sam9.c141 u32 offset, alarm, mr; in at91_rtc_settime() local
152 mr = rtt_readl(rtc, MR); in at91_rtc_settime()
155 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); in at91_rtc_settime()
176 mr &= ~AT91_RTT_ALMIEN; in at91_rtc_settime()
182 rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST); in at91_rtc_settime()
219 u32 mr; in at91_rtc_setalarm() local
231 mr = rtt_readl(rtc, MR); in at91_rtc_setalarm()
232 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); in at91_rtc_setalarm()
243 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); in at91_rtc_setalarm()
255 u32 mr = rtt_readl(rtc, MR); in at91_rtc_alarm_irq_enable() local
[all …]
/linux-4.4.14/drivers/sh/intc/
Dhandle.c44 struct intc_mask_reg *mr = desc->hw.mask_regs; in _intc_mask_data() local
48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { in _intc_mask_data()
49 mr = desc->hw.mask_regs + *reg_idx; in _intc_mask_data()
51 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { in _intc_mask_data()
52 if (mr->enum_ids[*fld_idx] != enum_id) in _intc_mask_data()
55 if (mr->set_reg && mr->clr_reg) { in _intc_mask_data()
58 reg_e = mr->clr_reg; in _intc_mask_data()
59 reg_d = mr->set_reg; in _intc_mask_data()
62 if (mr->set_reg) { in _intc_mask_data()
64 reg_e = mr->set_reg; in _intc_mask_data()
[all …]
Dbalancing.c44 struct intc_mask_reg *mr = desc->hw.mask_regs; in intc_dist_data() local
48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { in intc_dist_data()
49 mr = desc->hw.mask_regs + i; in intc_dist_data()
55 if (!mr->dist_reg) in intc_dist_data()
58 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { in intc_dist_data()
59 if (mr->enum_ids[j] != enum_id) in intc_dist_data()
64 reg_e = mr->dist_reg; in intc_dist_data()
65 reg_d = mr->dist_reg; in intc_dist_data()
67 fn += (mr->reg_width >> 3) - 1; in intc_dist_data()
72 (mr->reg_width - 1) - j); in intc_dist_data()
/linux-4.4.14/net/ipv4/netfilter/
Dipt_MASQUERADE.c34 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in masquerade_tg_check() local
36 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in masquerade_tg_check()
40 if (mr->rangesize != 1) { in masquerade_tg_check()
41 pr_debug("bad rangesize %u\n", mr->rangesize); in masquerade_tg_check()
51 const struct nf_nat_ipv4_multi_range_compat *mr; in masquerade_tg() local
53 mr = par->targinfo; in masquerade_tg()
54 range.flags = mr->range[0].flags; in masquerade_tg()
55 range.min_proto = mr->range[0].min; in masquerade_tg()
56 range.max_proto = mr->range[0].max; in masquerade_tg()
Dnft_redir_ipv4.c25 struct nf_nat_ipv4_multi_range_compat mr; in nft_redir_ipv4_eval() local
27 memset(&mr, 0, sizeof(mr)); in nft_redir_ipv4_eval()
29 mr.range[0].min.all = in nft_redir_ipv4_eval()
31 mr.range[0].max.all = in nft_redir_ipv4_eval()
33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; in nft_redir_ipv4_eval()
36 mr.range[0].flags |= priv->flags; in nft_redir_ipv4_eval()
38 regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, in nft_redir_ipv4_eval()
/linux-4.4.14/net/mac80211/
Drc80211_minstrel.c73 int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma) in minstrel_get_tp_avg() argument
77 usecs = mr->perfect_tx_time; in minstrel_get_tp_avg()
82 if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100)) in minstrel_get_tp_avg()
202 struct minstrel_rate *mr = &mi->r[i]; in minstrel_update_stats() local
213 mr->adjusted_retry_count = mrs->retry_count >> 1; in minstrel_update_stats()
214 if (mr->adjusted_retry_count > 2) in minstrel_update_stats()
215 mr->adjusted_retry_count = 2; in minstrel_update_stats()
216 mr->sample_limit = 4; in minstrel_update_stats()
218 mr->sample_limit = -1; in minstrel_update_stats()
219 mr->adjusted_retry_count = mrs->retry_count; in minstrel_update_stats()
[all …]
Drc80211_minstrel_debugfs.c94 struct minstrel_rate *mr = &mi->r[i]; in minstrel_stats_open() local
103 p += sprintf(p, " %3u%s ", mr->bitrate / 2, in minstrel_stats_open()
104 (mr->bitrate & 1 ? ".5" : " ")); in minstrel_stats_open()
106 p += sprintf(p, "%6u ", mr->perfect_tx_time); in minstrel_stats_open()
108 tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100)); in minstrel_stats_open()
109 tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma); in minstrel_stats_open()
162 struct minstrel_rate *mr = &mi->r[i]; in minstrel_stats_csv_open() local
171 p += sprintf(p, ",%u%s", mr->bitrate / 2, in minstrel_stats_csv_open()
172 (mr->bitrate & 1 ? ".5," : ",")); in minstrel_stats_csv_open()
174 p += sprintf(p, "%u,",mr->perfect_tx_time); in minstrel_stats_csv_open()
[all …]
Drc80211_minstrel.h162 int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Dmr.c421 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument
423 mr->iova = iova; in mlx4_mr_alloc_reserved()
424 mr->size = size; in mlx4_mr_alloc_reserved()
425 mr->pd = pd; in mlx4_mr_alloc_reserved()
426 mr->access = access; in mlx4_mr_alloc_reserved()
427 mr->enabled = MLX4_MPT_DISABLED; in mlx4_mr_alloc_reserved()
428 mr->key = hw_index_to_key(mridx); in mlx4_mr_alloc_reserved()
430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
530 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
540 access, npages, page_shift, mr); in mlx4_mr_alloc()
[all …]
Den_main.c237 (void) mlx4_mr_free(dev, &mdev->mr); in mlx4_en_remove()
300 0, 0, &mdev->mr)) { in mlx4_en_add()
304 if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { in mlx4_en_add()
338 (void) mlx4_mr_free(dev, &mdev->mr); in mlx4_en_add()
DMakefile4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
/linux-4.4.14/arch/powerpc/kernel/
Dhead_64.S127 mr r24,r3
129 mr r25,r4
151 mr r3,r24
157 mr r4,r25
222 mr r24,r3
233 mr r3,r24
249 mr r24,r3
250 mr r25,r4
261 mr r3,r24
262 mr r4,r25
[all …]
Dmisc_32.S49 mr r1,r3
69 mr r1,r4
86 mr r10,r4
94 mr r10,r3
360 mr r6,r3
460 mr r6,r3
506 mr r6,r3
683 mr r3,r9
684 mr r4,r10
722 mr r29, r3
[all …]
Dmisc_64.S37 mr r1,r3
48 mr r1,r4
226 mr r6,r3
606 mr r1,r3
625 mr r31,r3 /* newstack (both) */
626 mr r30,r4 /* start (real) */
627 mr r29,r5 /* image (virt) */
628 mr r28,r6 /* control, unused */
629 mr r27,r7 /* clear_all() fn desc */
630 mr r26,r8 /* spare */
[all …]
Dhead_fsl_booke.S69 mr r30,r3
70 mr r31,r4
81 mr r23,r3
82 mr r25,r4
250 mr r3,r30
251 mr r4,r31
253 mr r5,r23
254 mr r6,r25
256 mr r5,r25
275 mr r3,r30
[all …]
Dhead_booke.h43 mr r11, r1; \
63 mr r1, r11; \
151 mr r11,r8; \
164 mr r1,r11; \
387 mr r4,r12; /* Pass SRR0 as arg2 */ \
Dentry_64.S63 mr r10,r1
255 mr r0,r3
421 mr r3,r15
423 mr r12,r14
576 mr r1,r8 /* start using new stack pointer */
705 mr r30,r4
707 mr r4,r30
721 mr r4,r1 /* src: current exception frame */
722 mr r1,r3 /* Reroute the trampoline frame to r1 */
1295 mr r31, r1
Dhead_32.S142 1: mr r31,r3 /* save device tree ptr */
191 mr r26,r3
230 mr r24,r3 /* cpu # */
409 mr r4,r12 /* SRR0 is fault address */
411 1: mr r4,r12
412 mr r5,r9
776 4: mr r5,r25
811 mr r24, r3 /* cpu # */
839 mr r4,r24
967 mr r4,r31
Dhead_8xx.S103 mr r31,r3 /* save device tree ptr */
470 mr r4,r12
471 mr r5,r9
680 mr r4,r31
756 mr r8, r10
799 mr r8, r9 /* Create vaddr for TLB */
805 mr r8, r9 /* Create paddr for TLB */
Dentry_32.S350 mr r6,r3
435 mr r3,r15
453 mr r0,r3
576 mr r5,r3
649 mr r3,r2
712 mr r12,r4 /* restart at exc_exit_restart */
787 mr r4,r1 /* src: current exception frame */
788 mr r1,r3 /* Reroute the trampoline frame to r1 */
1194 mr r4,r9
1217 mr r12,r11 /* restart at exc_exit_restart */
[all …]
Dexceptions-64e.S587 mr r14,r10
703 mr r9,r13 /* keep a copy of userland r13 */
774 mr r4,r14
839 mr r4,r14
1013 mr r4,r14
1014 mr r5,r15
1022 mr r5,r3
1056 1: mr r0,r13
1241 1: mr r7,r3 /* Set MAS0(TLBSEL) */
1268 mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
Dhead_40x.S61 mr r31,r3 /* save device tree ptr */
395 mr r4,r12 /* Pass SRR0 as arg2 */
520 mr r11, r12
620 mr r11, r12
850 mr r4,r31
Dmisc.S115 mr r3,r4
Dexceptions-64s.S37 mr r9,r13 ; \
478 mr r11,r1 /* Save r1 */
1280 mr r10,r1 /* Save r1 */
1604 mr r5,r3
1624 mr r5,r3
1638 mr r4,r3
Dhead_44x.S64 mr r31,r3 /* save device tree ptr */
205 mr r4,r31
896 mr r4,r25
1002 mr r24,r3 /* CPU number */
Dfsl_booke_entry_mapping.S213 mr r6, r25
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Dmr.c52 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, in mlx5_core_create_mkey() argument
86 mr->iova = be64_to_cpu(in->seg.start_addr); in mlx5_core_create_mkey()
87 mr->size = be64_to_cpu(in->seg.len); in mlx5_core_create_mkey()
88 mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; in mlx5_core_create_mkey()
89 mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; in mlx5_core_create_mkey()
92 be32_to_cpu(lout.mkey), key, mr->key); in mlx5_core_create_mkey()
96 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); in mlx5_core_create_mkey()
100 mlx5_base_mkey(mr->key), err); in mlx5_core_create_mkey()
101 mlx5_core_destroy_mkey(dev, mr); in mlx5_core_create_mkey()
108 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) in mlx5_core_destroy_mkey() argument
[all …]
DMakefile4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
Den_main.c979 c->mkey_be = cpu_to_be32(priv->mr.key); in mlx5e_open_channel()
2078 struct mlx5_core_mr *mr) in mlx5e_create_mkey() argument
2094 err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL, in mlx5e_create_mkey()
2143 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); in mlx5e_create_netdev()
2217 mlx5_core_destroy_mkey(mdev, &priv->mr); in mlx5e_create_netdev()
2251 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); in mlx5e_destroy_netdev()
Den.h491 struct mlx5_core_mr mr; member
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c778 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, in ocrdma_alloc_lkey() argument
783 mr->hwmr.fr_mr = 0; in ocrdma_alloc_lkey()
784 mr->hwmr.local_rd = 1; in ocrdma_alloc_lkey()
785 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; in ocrdma_alloc_lkey()
786 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; in ocrdma_alloc_lkey()
787 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; in ocrdma_alloc_lkey()
788 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; in ocrdma_alloc_lkey()
789 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; in ocrdma_alloc_lkey()
790 mr->hwmr.num_pbls = num_pbls; in ocrdma_alloc_lkey()
792 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); in ocrdma_alloc_lkey()
[all …]
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_mr.c430 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) in mthca_mr_alloc() argument
444 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); in mthca_mr_alloc()
463 if (!mr->mtt) in mthca_mr_alloc()
475 if (mr->mtt) in mthca_mr_alloc()
478 mr->mtt->first_seg * dev->limits.mtt_seg_size); in mthca_mr_alloc()
481 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); in mthca_mr_alloc()
513 u32 access, struct mthca_mr *mr) in mthca_mr_alloc_notrans() argument
515 mr->mtt = NULL; in mthca_mr_alloc_notrans()
516 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); in mthca_mr_alloc_notrans()
522 u32 access, struct mthca_mr *mr) in mthca_mr_alloc_phys() argument
[all …]
Dmthca_provider.c469 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq()
564 qp->mr.ibmr.lkey = ucmd.lkey; in mthca_create_qp()
689 cq->buf.mr.ibmr.lkey = ucmd.lkey; in mthca_create_cq()
798 lkey = cq->resize_buf->buf.mr.ibmr.lkey; in mthca_resize_cq()
880 struct mthca_mr *mr; in mthca_get_dma_mr() local
883 mr = kmalloc(sizeof *mr, GFP_KERNEL); in mthca_get_dma_mr()
884 if (!mr) in mthca_get_dma_mr()
889 convert_access(acc), mr); in mthca_get_dma_mr()
892 kfree(mr); in mthca_get_dma_mr()
896 mr->umem = NULL; in mthca_get_dma_mr()
[all …]
Dmthca_provider.h115 struct mthca_mr mr; member
187 struct mthca_mr mr; member
240 struct mthca_mr mr; member
273 struct mthca_mr mr; member
Dmthca_allocator.c196 int hca_write, struct mthca_mr *mr) in mthca_buf_alloc() argument
265 mr); in mthca_buf_alloc()
283 int is_direct, struct mthca_mr *mr) in mthca_buf_free() argument
287 if (mr) in mthca_buf_free()
288 mthca_free_mr(dev, mr); in mthca_buf_free()
Dmthca_dev.h426 int hca_write, struct mthca_mr *mr);
428 int is_direct, struct mthca_mr *mr);
472 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
474 u32 access, struct mthca_mr *mr);
478 u32 access, struct mthca_mr *mr);
479 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
Dmthca_srq.c104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context()
129 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_arbel_init_srq_context()
143 srq->is_direct, &srq->mr); in mthca_free_srq_buf()
164 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); in mthca_alloc_srq_buf()
Dmthca_eq.c523 &eq->mr); in mthca_create_eq()
543 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); in mthca_create_eq()
565 mthca_free_mr(dev, &eq->mr); in mthca_create_eq()
618 mthca_free_mr(dev, &eq->mr); in mthca_free_eq()
Dmthca_cq.c361 &dev->driver_pd, 1, &buf->mr); in mthca_alloc_cq_buf()
374 buf->is_direct, &buf->mr); in mthca_free_cq_buf()
842 cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey); in mthca_init_cq()
/linux-4.4.14/net/netfilter/
Dxt_NETMAP.c72 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in netmap_tg4() local
81 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); in netmap_tg4()
88 new_ip |= mr->range[0].min_ip & netmask; in netmap_tg4()
92 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; in netmap_tg4()
95 newrange.min_proto = mr->range[0].min; in netmap_tg4()
96 newrange.max_proto = mr->range[0].max; in netmap_tg4()
104 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in netmap_tg4_check() local
106 if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { in netmap_tg4_check()
110 if (mr->rangesize != 1) { in netmap_tg4_check()
111 pr_debug("bad rangesize %u.\n", mr->rangesize); in netmap_tg4_check()
Dxt_nat.c19 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_nat_checkentry_v0() local
21 if (mr->rangesize != 1) { in xt_nat_checkentry_v0()
45 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_snat_target_v0() local
55 xt_nat_convert_range(&range, &mr->range[0]); in xt_snat_target_v0()
62 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_dnat_target_v0() local
71 xt_nat_convert_range(&range, &mr->range[0]); in xt_dnat_target_v0()
Dxt_REDIRECT.c49 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in redirect_tg4_check() local
51 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in redirect_tg4_check()
55 if (mr->rangesize != 1) { in redirect_tg4_check()
56 pr_debug("bad rangesize %u.\n", mr->rangesize); in redirect_tg4_check()
Dnf_nat_redirect.c33 const struct nf_nat_ipv4_multi_range_compat *mr, in nf_nat_redirect_ipv4() argument
71 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; in nf_nat_redirect_ipv4()
74 newrange.min_proto = mr->range[0].min; in nf_nat_redirect_ipv4()
75 newrange.max_proto = mr->range[0].max; in nf_nat_redirect_ipv4()
/linux-4.4.14/drivers/watchdog/
Dat91sam9_wdt.c89 u32 mr; member
167 if ((tmp & mask) != (wdt->mr & mask)) { in at91_wdt_init()
169 wdt_write(wdt, AT91_WDT_MR, wdt->mr); in at91_wdt_init()
175 if (wdt->mr & AT91_WDT_WDDIS) in at91_wdt_init()
220 if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask)) in at91_wdt_init()
223 tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask); in at91_wdt_init()
296 wdt->mr = 0; in of_at91wdt_init()
299 wdt->mr |= AT91_WDT_WDFIEN; in of_at91wdt_init()
302 wdt->mr |= AT91_WDT_WDRSTEN; in of_at91wdt_init()
307 wdt->mr |= AT91_WDT_WDRPROC; in of_at91wdt_init()
[all …]
/linux-4.4.14/drivers/isdn/hisax/
Dhfc4s8s_l1.c195 } mr; member
319 l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= in dch_l2l1()
399 l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= in bch_l2l1()
418 l1->hw->mr.timer_usg_cnt++; in bch_l2l1()
419 l1->hw->mr. in bch_l2l1()
448 l1->hw->mr.r_ctrl0 |= in bch_l2l1()
451 l1->hw->mr.r_ctrl0); in bch_l2l1()
465 l1->hw->mr. in bch_l2l1()
471 l1->hw->mr.timer_usg_cnt++; in bch_l2l1()
495 l1->hw->mr.r_ctrl0 |= in bch_l2l1()
[all …]
/linux-4.4.14/drivers/infiniband/core/
Dverbs.c251 struct ib_mr *mr; in ib_alloc_pd() local
253 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); in ib_alloc_pd()
254 if (IS_ERR(mr)) { in ib_alloc_pd()
256 return (struct ib_pd *)mr; in ib_alloc_pd()
259 pd->local_mr = mr; in ib_alloc_pd()
1204 struct ib_mr *mr; in ib_get_dma_mr() local
1211 mr = pd->device->get_dma_mr(pd, mr_access_flags); in ib_get_dma_mr()
1213 if (!IS_ERR(mr)) { in ib_get_dma_mr()
1214 mr->device = pd->device; in ib_get_dma_mr()
1215 mr->pd = pd; in ib_get_dma_mr()
[all …]
Duverbs_cmd.c955 struct ib_mr *mr; in ib_uverbs_reg_mr() local
1000 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, in ib_uverbs_reg_mr()
1002 if (IS_ERR(mr)) { in ib_uverbs_reg_mr()
1003 ret = PTR_ERR(mr); in ib_uverbs_reg_mr()
1007 mr->device = pd->device; in ib_uverbs_reg_mr()
1008 mr->pd = pd; in ib_uverbs_reg_mr()
1009 mr->uobject = uobj; in ib_uverbs_reg_mr()
1011 atomic_set(&mr->usecnt, 0); in ib_uverbs_reg_mr()
1013 uobj->object = mr; in ib_uverbs_reg_mr()
1019 resp.lkey = mr->lkey; in ib_uverbs_reg_mr()
[all …]
/linux-4.4.14/arch/arm/mach-omap2/
Dsdram-qimonda-hyb18m512160af-6.h26 .mr = 0x00000032,
33 .mr = 0x00000032,
40 .mr = 0x00000022,
47 .mr = 0x00000022,
Dsdram-micron-mt46h32m32lf-6.h27 .mr = 0x00000032,
34 .mr = 0x00000032,
41 .mr = 0x00000032,
48 .mr = 0x00000032,
Dsdram-numonyx-m65kxxxxam.h23 .mr = 0x00000032,
30 .mr = 0x00000032,
37 .mr = 0x00000032,
44 .mr = 0x00000032,
Dsdram-hynix-h8mbx00u0mer-0em.h23 .mr = 0x00000032,
30 .mr = 0x00000032,
37 .mr = 0x00000022,
44 .mr = 0x00000022,
Dsdrc.h75 u32 mr; member
Dsdram-nokia.c277 nokia_sdrc_params[id].mr = 0x32; in sdrc_timings()
/linux-4.4.14/net/sunrpc/xprtrdma/
Dphysical_ops.c26 struct ib_mr *mr; in physical_op_open() local
30 mr = ib_get_dma_mr(ia->ri_pd, in physical_op_open()
34 if (IS_ERR(mr)) { in physical_op_open()
36 __func__, PTR_ERR(mr)); in physical_op_open()
40 ia->ri_dma_mr = mr; in physical_op_open()
Dfrwr_ops.c321 struct ib_mr *mr; in frwr_op_map() local
338 mr = frmr->fr_mr; in frwr_op_map()
370 n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, PAGE_SIZE); in frwr_op_map()
379 __func__, mw, frmr->sg_nents, mr->length); in frwr_op_map()
381 key = (u8)(mr->rkey & 0x000000FF); in frwr_op_map()
382 ib_update_fast_reg_key(mr, ++key); in frwr_op_map()
389 reg_wr.mr = mr; in frwr_op_map()
390 reg_wr.key = mr->rkey; in frwr_op_map()
402 seg1->mr_rkey = mr->rkey; in frwr_op_map()
403 seg1->mr_base = mr->iova; in frwr_op_map()
[all …]
Dsvc_rdma_recvfrom.c284 n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, PAGE_SIZE); in rdma_read_chunk_frmr()
287 frmr->mr, n, frmr->sg_nents); in rdma_read_chunk_frmr()
292 key = (u8)(frmr->mr->lkey & 0x000000FF); in rdma_read_chunk_frmr()
293 ib_update_fast_reg_key(frmr->mr, ++key); in rdma_read_chunk_frmr()
295 ctxt->sge[0].addr = frmr->mr->iova; in rdma_read_chunk_frmr()
296 ctxt->sge[0].lkey = frmr->mr->lkey; in rdma_read_chunk_frmr()
297 ctxt->sge[0].length = frmr->mr->length; in rdma_read_chunk_frmr()
306 reg_wr.mr = frmr->mr; in rdma_read_chunk_frmr()
307 reg_wr.key = frmr->mr->lkey; in rdma_read_chunk_frmr()
321 read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; in rdma_read_chunk_frmr()
[all …]
Dsvc_rdma_transport.c792 struct ib_mr *mr; in rdma_alloc_frmr() local
802 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg); in rdma_alloc_frmr()
803 if (IS_ERR(mr)) in rdma_alloc_frmr()
812 frmr->mr = mr; in rdma_alloc_frmr()
818 ib_dereg_mr(mr); in rdma_alloc_frmr()
834 ib_dereg_mr(frmr->mr); in rdma_dealloc_frmr_q()
/linux-4.4.14/arch/powerpc/mm/
Dhash_low_64.S161 mr r4,r30
162 mr r5,r7
200 mr r4,r29 /* Retrieve vpn */
225 mr r4,r29 /* Retrieve vpn */
242 mr r5,r28
283 mr r4,r3
287 mr r5,r28
298 mr r5,r29 /* vpn */
477 mr r4,r30
478 mr r5,r7
[all …]
Dtlb_nohash_low.S452 mr r7,r5
464 mr r9,r3
469 mr r3,r9
Dslb_low.S149 mr r11,r10
/linux-4.4.14/sound/soc/fsl/
Dfsl_dma.c396 u32 mr; in fsl_dma_open() local
482 mr = in_be32(&dma_channel->mr) & in fsl_dma_open()
500 mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN | in fsl_dma_open()
505 mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? in fsl_dma_open()
508 out_be32(&dma_channel->mr, mr); in fsl_dma_open()
567 u32 mr; /* DMA Mode Register */ in fsl_dma_hw_params() local
582 mr = in_be32(&dma_channel->mr) & ~(CCSR_DMA_MR_BWC_MASK | in fsl_dma_hw_params()
592 mr |= CCSR_DMA_MR_DAHTS_1 | CCSR_DMA_MR_SAHTS_1; in fsl_dma_hw_params()
596 mr |= CCSR_DMA_MR_DAHTS_2 | CCSR_DMA_MR_SAHTS_2; in fsl_dma_hw_params()
600 mr |= CCSR_DMA_MR_DAHTS_4 | CCSR_DMA_MR_SAHTS_4; in fsl_dma_hw_params()
[all …]
Dfsl_dma.h15 __be32 mr; /* Mode register */ member
/linux-4.4.14/drivers/staging/rdma/ehca/
Dehca_mrmw.c440 int ehca_rereg_phys_mr(struct ib_mr *mr, in ehca_rereg_phys_mr() argument
451 container_of(mr->device, struct ehca_shca, ib_device); in ehca_rereg_phys_mr()
452 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); in ehca_rereg_phys_mr()
465 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not " in ehca_rereg_phys_mr()
473 ehca_err(mr->device, "rereg with bad pd, pd=%p " in ehca_rereg_phys_mr()
490 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p " in ehca_rereg_phys_mr()
492 mr, shca->maxmr, mr->lkey); in ehca_rereg_phys_mr()
498 ehca_err(mr->device, "not supported for FMR, mr=%p " in ehca_rereg_phys_mr()
499 "flags=%x", mr, e_mr->flags); in ehca_rereg_phys_mr()
504 ehca_err(mr->device, "bad input values mr_rereg_mask=%x" in ehca_rereg_phys_mr()
[all …]
Dhcp_if.h198 const struct ehca_mr *mr,
207 const struct ehca_mr *mr,
215 const struct ehca_mr *mr,
220 const struct ehca_mr *mr);
224 const struct ehca_mr *mr,
234 const struct ehca_mr *mr,
Dhcp_if.c732 const struct ehca_mr *mr, in hipz_h_alloc_resource_mr() argument
758 const struct ehca_mr *mr, in hipz_h_register_rpage_mr() argument
784 adapter_handle.handle, mr, in hipz_h_register_rpage_mr()
785 mr->ipz_mr_handle.handle, pagesize, queue_type, in hipz_h_register_rpage_mr()
791 mr->ipz_mr_handle.handle, in hipz_h_register_rpage_mr()
797 const struct ehca_mr *mr, in hipz_h_query_mr() argument
805 mr->ipz_mr_handle.handle, /* r5 */ in hipz_h_query_mr()
817 const struct ehca_mr *mr) in hipz_h_free_resource_mr() argument
821 mr->ipz_mr_handle.handle, /* r5 */ in hipz_h_free_resource_mr()
826 const struct ehca_mr *mr, in hipz_h_reregister_pmr() argument
[all …]
Dehca_iverbs.h92 int ehca_rereg_phys_mr(struct ib_mr *mr,
98 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
100 int ehca_dereg_mr(struct ib_mr *mr);
Dehca_mrmw.h125 void ehca_mr_deletenew(struct ehca_mr *mr);
/linux-4.4.14/drivers/infiniband/ulp/iser/
Diser_memory.c254 reg->rkey = device->mr->rkey; in iser_reg_dma()
417 iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) in iser_inv_rkey() argument
423 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey()
427 rkey = ib_inc_rkey(mr->rkey); in iser_inv_rkey()
428 ib_update_fast_reg_key(mr, rkey); in iser_inv_rkey()
488 struct ib_mr *mr = rsc->mr; in iser_fast_reg_mr() local
493 iser_inv_rkey(iser_tx_next_wr(tx_desc), mr); in iser_fast_reg_mr()
495 n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K); in iser_fast_reg_mr()
507 wr->mr = mr; in iser_fast_reg_mr()
508 wr->key = mr->rkey; in iser_fast_reg_mr()
[all …]
Diser_verbs.c141 device->mr = ib_get_dma_mr(device->pd, access); in iser_create_device_ib_res()
142 if (IS_ERR(device->mr)) in iser_create_device_ib_res()
154 if (device->mr) in iser_create_device_ib_res()
155 ib_dereg_mr(device->mr); in iser_create_device_ib_res()
191 if (device->mr) in iser_free_device_ib_res()
192 (void)ib_dereg_mr(device->mr); in iser_free_device_ib_res()
198 device->mr = NULL; in iser_free_device_ib_res()
296 res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size); in iser_alloc_reg_res()
297 if (IS_ERR(res->mr)) { in iser_alloc_reg_res()
298 ret = PTR_ERR(res->mr); in iser_alloc_reg_res()
[all …]
Discsi_iser.h397 struct ib_mr *mr; member
420 struct ib_mr *mr; member
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.c621 struct usnic_ib_mr *mr; in usnic_ib_reg_mr() local
627 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in usnic_ib_reg_mr()
628 if (IS_ERR_OR_NULL(mr)) in usnic_ib_reg_mr()
629 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM); in usnic_ib_reg_mr()
631 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, in usnic_ib_reg_mr()
633 if (IS_ERR_OR_NULL(mr->umem)) { in usnic_ib_reg_mr()
634 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; in usnic_ib_reg_mr()
638 mr->ibmr.lkey = mr->ibmr.rkey = 0; in usnic_ib_reg_mr()
639 return &mr->ibmr; in usnic_ib_reg_mr()
642 kfree(mr); in usnic_ib_reg_mr()
[all …]
/linux-4.4.14/arch/powerpc/lib/
Dstring_64.S66 mr r3,r8
77 mr r3,r4
85 mr r8,r3
133 6: mr r8,r3
181 mr r8,r3
192 mr r8,r3
Dldstfp.S100 mr r3,r9
127 mr r3,r9
154 mr r3,r9
181 mr r3,r9
248 mr r3,r9
276 mr r3,r9
344 mr r3,r9
372 mr r3,r9
Ddiv64.S30 1: mr r11,r5 # here dividend.hi != 0
58 mr r3,r6 # return the remainder in r3
Dcopyuser_64.S72 mr r9,r7
73 mr r8,r6
325 mr r4,r3
326 mr r3,r5 /* return the number of bytes not copied */
Dmem_64.S23 mr r6,r3
Dstring.S152 90: mr r3,r4
Dmemcmp_64.S81 mr r3,rC
Dmemcpy_64.S67 mr r8,r9
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.c815 struct ehea_mr *mr) in ehea_reg_mr_section() argument
830 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, in ehea_reg_mr_section()
835 ehea_h_free_resource(adapter->handle, mr->handle, in ehea_reg_mr_section()
846 struct ehea_mr *mr) in ehea_reg_mr_sections() argument
855 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); in ehea_reg_mr_sections()
864 struct ehea_mr *mr) in ehea_reg_mr_dir_sections() argument
873 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); in ehea_reg_mr_dir_sections()
880 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) in ehea_reg_kernel_mr() argument
898 &mr->handle, &mr->lkey); in ehea_reg_kernel_mr()
907 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); in ehea_reg_kernel_mr()
[all …]
Dehea_qmr.h388 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
393 int ehea_rem_mr(struct ehea_mr *mr);
Dehea_phyp.c449 struct ehea_mr *mr) in ehea_h_register_smr() argument
463 mr->handle = outs[0]; in ehea_h_register_smr()
464 mr->lkey = (u32)outs[2]; in ehea_h_register_smr()
Dehea.h398 struct ehea_mr mr; member
Dehea_main.c227 if (adapter->mr.handle) { in ehea_update_firmware_handles()
229 arr[i++].fwh = adapter->mr.handle; in ehea_update_firmware_handles()
1423 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1427 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
2345 ehea_rem_mr(&adapter->mr); in ehea_remove_adapter_mr()
2353 return ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_add_adapter_mr()
2782 ret = ehea_rem_mr(&adapter->mr); in ehea_rereg_mrs()
2794 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_rereg_mrs()
Dehea_phyp.h416 struct ehea_mr *mr);
/linux-4.4.14/arch/powerpc/kernel/vdso32/
Dgettimeofday.S40 mr r10,r3 /* r10 saves tv */
41 mr r11,r4 /* r11 saves tz */
43 mr r9, r3 /* datapage ptr in r9 */
82 mr r11,r4 /* r11 saves tp */
84 mr r9,r3 /* datapage ptr in r9 */
194 mr r11,r3 /* r11 holds t */
196 mr r9, r3 /* datapage ptr in r9 */
Dcacheflush.S31 mr r11,r3
34 mr r10,r3
Ddatapage.S58 mr r4,r3
/linux-4.4.14/drivers/tty/serial/
Dmsm_serial.c835 unsigned int mr; in msm_set_mctrl() local
837 mr = msm_read(port, UART_MR1); in msm_set_mctrl()
840 mr &= ~UART_MR1_RX_RDY_CTL; in msm_set_mctrl()
841 msm_write(port, mr, UART_MR1); in msm_set_mctrl()
844 mr |= UART_MR1_RX_RDY_CTL; in msm_set_mctrl()
845 msm_write(port, mr, UART_MR1); in msm_set_mctrl()
1035 unsigned int baud, mr; in msm_set_termios() local
1049 mr = msm_read(port, UART_MR2); in msm_set_termios()
1050 mr &= ~UART_MR2_PARITY_MODE; in msm_set_termios()
1053 mr |= UART_MR2_PARITY_MODE_ODD; in msm_set_termios()
[all …]
Datmel_serial.c2495 unsigned int mr, quot; in atmel_console_get_options() local
2505 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; in atmel_console_get_options()
2506 if (mr == ATMEL_US_CHRL_8) in atmel_console_get_options()
2511 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; in atmel_console_get_options()
2512 if (mr == ATMEL_US_PAR_EVEN) in atmel_console_get_options()
2514 else if (mr == ATMEL_US_PAR_ODD) in atmel_console_get_options()
/linux-4.4.14/drivers/video/fbdev/omap2/omapfb/
Domapfb-ioctl.c487 struct omapfb_memory_read *mr) in omapfb_memory_read() argument
496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) in omapfb_memory_read()
499 if (mr->w * mr->h * 3 > mr->buffer_size) in omapfb_memory_read()
502 buf = vmalloc(mr->buffer_size); in omapfb_memory_read()
508 r = display->driver->memory_read(display, buf, mr->buffer_size, in omapfb_memory_read()
509 mr->x, mr->y, mr->w, mr->h); in omapfb_memory_read()
512 if (copy_to_user(mr->buffer, buf, mr->buffer_size)) in omapfb_memory_read()
/linux-4.4.14/drivers/scsi/qla2xxx/
Dqla_mr.c702 snprintf(str, size, "%s", ha->mr.fw_version); in qlafx00_fw_version_str()
1252 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, in qlafx00_configure_all_targets()
1357 ha->mr.fw_hbt_en = 0; in qlafx00_abort_isp_cleanup()
1470 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) in qlafx00_rescan_isp()
1488 if (ha->mr.fw_hbt_cnt) in qlafx00_timer_routine()
1489 ha->mr.fw_hbt_cnt--; in qlafx00_timer_routine()
1494 (ha->mr.fw_hbt_en)) { in qlafx00_timer_routine()
1496 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { in qlafx00_timer_routine()
1497 ha->mr.old_fw_hbt_cnt = fw_heart_beat; in qlafx00_timer_routine()
1498 ha->mr.fw_hbt_miss_cnt = 0; in qlafx00_timer_routine()
[all …]
Dqla_os.c2497 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; in qla2x00_probe_one()
2498 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; in qla2x00_probe_one()
2499 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; in qla2x00_probe_one()
2500 ha->mr.fw_hbt_en = 1; in qla2x00_probe_one()
2501 ha->mr.host_info_resend = false; in qla2x00_probe_one()
2502 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; in qla2x00_probe_one()
2592 ha->mr.fcport.vha = base_vha; in qla2x00_probe_one()
2593 ha->mr.fcport.port_type = FCT_UNKNOWN; in qla2x00_probe_one()
2594 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; in qla2x00_probe_one()
2595 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); in qla2x00_probe_one()
[all …]
/linux-4.4.14/arch/powerpc/kernel/vdso64/
Dgettimeofday.S31 mr r11,r3 /* r11 holds tv */
32 mr r10,r4 /* r10 holds tz */
71 mr r11,r4 /* r11 saves tp */
178 mr r11,r3 /* r11 holds t */
188 mr r3,r4
Dcacheflush.S31 mr r11,r3
34 mr r10,r3
Ddatapage.S58 mr r4,r3
/linux-4.4.14/arch/x86/include/asm/uv/
Duv_bau.h683 static inline void write_mmr_sw_ack(unsigned long mr) in write_mmr_sw_ack() argument
685 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); in write_mmr_sw_ack()
688 static inline void write_gmmr_sw_ack(int pnode, unsigned long mr) in write_gmmr_sw_ack() argument
690 write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); in write_gmmr_sw_ack()
703 static inline void write_mmr_data_config(int pnode, unsigned long mr) in write_mmr_data_config() argument
705 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); in write_mmr_data_config()
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_mm.c177 struct c2_mr *mr) in c2_nsmr_register_phys_kern() argument
230 wr->pd_id = mr->pd->pd_id; in c2_nsmr_register_phys_kern()
280 mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); in c2_nsmr_register_phys_kern()
294 cpu_to_be32(mr->ibmr.lkey), in c2_nsmr_register_phys_kern()
Dc2_provider.c344 struct c2_mr *mr; in c2_reg_phys_mr() local
396 mr = kmalloc(sizeof(*mr), GFP_KERNEL); in c2_reg_phys_mr()
397 if (!mr) { in c2_reg_phys_mr()
402 mr->pd = to_c2pd(ib_pd); in c2_reg_phys_mr()
403 mr->umem = NULL; in c2_reg_phys_mr()
413 c2_convert_access(acc), mr); in c2_reg_phys_mr()
416 kfree(mr); in c2_reg_phys_mr()
420 return &mr->ibmr; in c2_reg_phys_mr()
504 struct c2_mr *mr = to_c2mr(ib_mr); in c2_dereg_mr() local
513 if (mr->umem) in c2_dereg_mr()
[all …]
/linux-4.4.14/arch/powerpc/crypto/
Daes-spe-modes.S91 mr rKS,rKP; \
231 mr rKP,rKS
267 mr rKP,rKS
306 mr rKP,rKS
368 mr rKP,rKS
393 mr rKP,rKS
430 mr rKP,rKS
460 mr rKP,rKS
513 mr rKP,rKT
525 mr rKP,rKS
[all …]
Daes-spe-keys.S93 mr r14,r8 /* apply LS_BOX to 4th temp */
139 mr r14,r10 /* apply LS_BOX to 6th temp */
193 mr r14,r12 /* apply LS_BOX to 8th temp */
201 mr r14,r8
Dsha1-powerpc-asm.S169 mr RE(0),r20
/linux-4.4.14/drivers/net/ethernet/ibm/emac/
Dtah.c55 out_be32(&p->mr, TAH_MR_SR); in tah_reset()
57 while ((in_be32(&p->mr) & TAH_MR_SR) && n) in tah_reset()
65 out_be32(&p->mr, in tah_reset()
Dtah.h29 u32 mr; member
/linux-4.4.14/include/rdma/
Dib_verbs.h1076 struct ib_mr *mr; member
1140 struct ib_mr *mr; member
1737 int (*rereg_user_mr)(struct ib_mr *mr,
1744 int (*query_mr)(struct ib_mr *mr,
1746 int (*dereg_mr)(struct ib_mr *mr);
1750 int (*map_mr_sg)(struct ib_mr *mr,
1753 int (*rereg_phys_mr)(struct ib_mr *mr,
1799 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2846 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2855 int ib_dereg_mr(struct ib_mr *mr);
[all …]
/linux-4.4.14/arch/x86/kernel/
Dtboot.c178 struct tboot_mac_region *mr; in add_mac_region() local
185 mr = &tboot->mac_regions[tboot->num_mac_regions++]; in add_mac_region()
186 mr->start = round_down(start, PAGE_SIZE); in add_mac_region()
187 mr->size = round_up(end, PAGE_SIZE) - mr->start; in add_mac_region()
/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_hv_rmhandlers.S276 13: mr r3, r12
699 mr r31, r4
704 mr r4, r31
999 mr r10,r0
1001 mr r9, r4
1083 mr r9, r4
1177 mr r4, r9
1211 mr r4,r9
1241 mr r4, r9
1262 mr r4, r9
[all …]
Dmpic.c1289 static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr) in add_mmio_region() argument
1296 opp->mmio_regions[opp->num_mmio_regions++] = mr; in add_mmio_region()
1345 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_read_internal() local
1347 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_read_internal()
1350 return mr->read(opp, addr - mr->start_addr, ptr); in kvm_mpic_read_internal()
1361 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_write_internal() local
1363 if (mr->start_addr > addr || addr >= mr->start_addr + mr->size) in kvm_mpic_write_internal()
1366 return mr->write(opp, addr - mr->start_addr, val); in kvm_mpic_write_internal()
Dbookehv_interrupts.S179 mr r11, r4
309 mr r4, r11
337 mr r4, r11
445 mr r5, r14 /* intno */
446 mr r14, r4 /* Save vcpu pointer. */
450 mr r4, r14
Dbook3s_segment.S25 mr reg, r13
Dbooke_interrupts.S253 mr r14, r4 /* Save vcpu pointer. */
258 mr r4, r14
/linux-4.4.14/arch/powerpc/net/
Dbpf_jit_asm.S112 mr r4, r_addr; \
164 mr r4, r_addr; \
176 mr r_addr, r3; \
/linux-4.4.14/arch/m68k/ifpsp060/src/
Dilsp.S546 mov.l %d0,%d2 # mr in d2
547 mov.l %d0,%d3 # mr in d3
549 swap %d3 # hi(mr) in lo d3
553 mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
554 mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
555 mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
556 mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
672 mov.l %d0,%d2 # mr in d2
673 mov.l %d0,%d3 # mr in d3
675 swap %d3 # hi(mr) in lo d3
[all …]
/linux-4.4.14/include/net/netfilter/
Dnf_nat_redirect.h6 const struct nf_nat_ipv4_multi_range_compat *mr,
/linux-4.4.14/drivers/spi/
Dspi-atmel.c369 u32 mr; in cs_activate() local
390 mr = spi_readl(as, MR); in cs_activate()
406 mr = spi_readl(as, MR); in cs_activate()
407 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); in cs_activate()
410 spi_writel(as, MR, mr); in cs_activate()
415 mr); in cs_activate()
422 u32 mr; in cs_deactivate() local
427 mr = spi_readl(as, MR); in cs_deactivate()
428 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { in cs_deactivate()
429 mr = SPI_BFINS(PCS, 0xf, mr); in cs_deactivate()
[all …]
/linux-4.4.14/drivers/ipack/devices/
Dscc2698.h26 u8 d0, mr; /* Mode register 1/2*/ member
33 u8 d0, mr; /* Mode register 1/2 */ member
Dipoctal.c335 &channel->regs->w.mr); /* mr1 */ in ipoctal_inst_slot()
336 iowrite8(0, &channel->regs->w.mr); /* mr2 */ in ipoctal_inst_slot()
606 iowrite8(mr1, &channel->regs->w.mr); in ipoctal_set_termios()
607 iowrite8(mr2, &channel->regs->w.mr); in ipoctal_set_termios()
/linux-4.4.14/tools/testing/selftests/powerpc/switch_endian/
Dcheck.S12 mr r9,r15
96 1: mr r3, r9
Dswitch_endian_test.S24 mr r3, r15
/linux-4.4.14/drivers/dma/
Dfsldma.h109 u32 mr; /* 0x00 - Mode Register */ member
143 u32 mr; member
Dfsldma.c66 DMA_OUT(chan, &chan->regs->mr, val, 32); in set_mr()
71 return DMA_IN(chan, &chan->regs->mr, 32); in get_mr()
1462 chan->regs_save.mr = get_mr(chan); in fsldma_suspend_late()
1493 mode = chan->regs_save.mr in fsldma_resume_early()
/linux-4.4.14/arch/s390/kernel/vdso32/
Dclock_gettime.S48 mr %r0,%r0
111 mr %r0,%r0
Dgettimeofday.S43 mr %r0,%r0
/linux-4.4.14/arch/powerpc/platforms/52xx/
Dlite5200_sleep.S44 mr r7, r3 /* save SRAM va */
45 mr r8, r4 /* save MBAR va */
76 mr r4, r7
/linux-4.4.14/arch/powerpc/boot/
Ddiv64.S30 1: mr r11,r5 # here dividend.hi != 0
58 mr r3,r6 # return the remainder in r3
Dps3-hvcall.S140 mr r4, r3
Dcrtsavres.S231 mr 1,11
/linux-4.4.14/drivers/infiniband/ulp/srp/
Dib_srp.c343 if (d->mr) in srp_destroy_fr_pool()
344 ib_dereg_mr(d->mr); in srp_destroy_fr_pool()
362 struct ib_mr *mr; in srp_create_fr_pool() local
378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, in srp_create_fr_pool()
380 if (IS_ERR(mr)) { in srp_create_fr_pool()
381 ret = PTR_ERR(mr); in srp_create_fr_pool()
384 d->mr = mr; in srp_create_fr_pool()
1077 res = srp_inv_rkey(ch, (*pfr)->mr->rkey); in srp_unmap_data()
1081 (*pfr)->mr->rkey, res); in srp_unmap_data()
1345 rkey = ib_inc_rkey(desc->mr->rkey); in srp_map_finish_fr()
[all …]
Dib_srp.h244 struct ib_mr *mr; member
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c1315 struct ib_mr *mr; in kiblnd_find_rd_dma_mr() local
1327 for (i = 0, mr = prev_mr = NULL; in kiblnd_find_rd_dma_mr()
1329 mr = kiblnd_find_dma_mr(hdev, in kiblnd_find_rd_dma_mr()
1333 prev_mr = mr; in kiblnd_find_rd_dma_mr()
1335 if (mr == NULL || prev_mr != mr) { in kiblnd_find_rd_dma_mr()
1337 mr = NULL; in kiblnd_find_rd_dma_mr()
1342 return mr; in kiblnd_find_rd_dma_mr()
2149 struct ib_mr *mr; in kiblnd_hdev_setup_mrs() local
2166 mr = ib_get_dma_mr(hdev->ibh_pd, acflags); in kiblnd_hdev_setup_mrs()
2167 if (IS_ERR(mr)) { in kiblnd_hdev_setup_mrs()
[all …]
Do2iblnd_cb.c152 struct ib_mr *mr; in kiblnd_post_rx() local
161 mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); in kiblnd_post_rx()
162 LASSERT(mr != NULL); in kiblnd_post_rx()
164 rx->rx_sge.lkey = mr->lkey; in kiblnd_post_rx()
624 struct ib_mr *mr = NULL; in kiblnd_map_tx() local
645 mr = kiblnd_find_rd_dma_mr(hdev, rd); in kiblnd_map_tx()
646 if (mr != NULL) { in kiblnd_map_tx()
648 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; in kiblnd_map_tx()
1017 struct ib_mr *mr; in kiblnd_init_tx_msg() local
1025 mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); in kiblnd_init_tx_msg()
[all …]
/linux-4.4.14/drivers/media/usb/usbvision/
Dusbvision.h193 #define YUV_TO_RGB_BY_THE_BOOK(my, mu, mv, mr, mg, mb) { \ argument
204 mr = LIMIT_RGB(mm_r); \
/linux-4.4.14/include/linux/mlx5/
Ddriver.h725 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
729 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
730 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
732 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
/linux-4.4.14/tools/testing/selftests/powerpc/copyloops/
Dcopyuser_64.S72 mr r9,r7
73 mr r8,r6
325 mr r4,r3
326 mr r3,r5 /* return the number of bytes not copied */
Dmemcpy_64.S67 mr r8,r9
/linux-4.4.14/include/linux/mlx4/
Ddevice.h682 struct mlx4_mr mr; member
1073 int npages, int page_shift, struct mlx4_mr *mr);
1074 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
1075 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
1476 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr);
1477 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
/linux-4.4.14/drivers/infiniband/ulp/isert/
Dib_isert.c2501 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) in isert_inv_rkey() argument
2508 inv_wr->ex.invalidate_rkey = mr->rkey; in isert_inv_rkey()
2511 rkey = ib_inc_rkey(mr->rkey); in isert_inv_rkey()
2512 ib_update_fast_reg_key(mr, rkey); in isert_inv_rkey()
2524 struct ib_mr *mr; in isert_fast_reg_mr() local
2540 mr = fr_desc->data_mr; in isert_fast_reg_mr()
2543 mr = fr_desc->pi_ctx->prot_mr; in isert_fast_reg_mr()
2546 isert_inv_rkey(&inv_wr, mr); in isert_fast_reg_mr()
2550 n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE); in isert_fast_reg_mr()
2565 reg_wr.mr = mr; in isert_fast_reg_mr()
[all …]
/linux-4.4.14/include/linux/sunrpc/
Dsvc_rdma.h107 struct ib_mr *mr; member
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/
Dfb.h130 u32 mr[16]; member
/linux-4.4.14/arch/powerpc/platforms/powernv/
Dopal-wrappers.S114 mr r3,r0
150 mr r4,r3
/linux-4.4.14/drivers/message/fusion/
Dmptscsih.c588 mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) in mptscsih_io_done() argument
600 req_idx_MR = (mr != NULL) ? in mptscsih_io_done()
601 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; in mptscsih_io_done()
645 pScsiReply = (SCSIIOReply_t *) mr; in mptscsih_io_done()
650 ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag)); in mptscsih_io_done()
654 ioc->name, mf, mr, sc, req_idx)); in mptscsih_io_done()
2060 MPT_FRAME_HDR *mr) in mptscsih_taskmgmt_complete() argument
2063 "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr)); in mptscsih_taskmgmt_complete()
2067 if (!mr) in mptscsih_taskmgmt_complete()
2071 memcpy(ioc->taskmgmt_cmds.reply, mr, in mptscsih_taskmgmt_complete()
[all …]
Dmptbase.c446 MPT_FRAME_HDR *mr = NULL; in mpt_turbo_reply() local
478 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa); in mpt_turbo_reply()
482 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa); in mpt_turbo_reply()
497 if (MptCallbacks[cb_idx](ioc, mf, mr)) in mpt_turbo_reply()
507 MPT_FRAME_HDR *mr; in mpt_reply() local
526 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames + in mpt_reply()
529 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx); in mpt_reply()
530 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; in mpt_reply()
534 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function)); in mpt_reply()
535 DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr); in mpt_reply()
[all …]
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dmem.c502 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, in c4iw_reregister_phys_mem() argument
516 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); in c4iw_reregister_phys_mem()
519 if (atomic_read(&mr->usecnt)) in c4iw_reregister_phys_mem()
522 mhp = to_c4iw_mr(mr); in c4iw_reregister_phys_mem()
524 php = to_c4iw_pd(mr->pd); in c4iw_reregister_phys_mem()
/linux-4.4.14/arch/x86/platform/uv/
Dtlb_uv.c249 unsigned long mr; in bau_process_retry_msg() local
257 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; in bau_process_retry_msg()
258 write_mmr_sw_ack(mr); in bau_process_retry_msg()
383 unsigned long mr; in do_reset() local
393 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; in do_reset()
396 write_mmr_sw_ack(mr); in do_reset()
/linux-4.4.14/firmware/dsp56k/
Dbootstrap.asm59 and #<$fe,mr
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes_verbs.c259 ibmw_bind->bind_info.mr->lkey); in nes_bind_mw()
3405 struct nes_mr *mr = to_nesmr(reg_wr(ib_wr)->mr); in nes_post_send() local
3406 int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size); in nes_post_send()
3409 if (mr->npages > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) { in nes_post_send()
3417 mr->ibmr.iova); in nes_post_send()
3420 mr->ibmr.length); in nes_post_send()
3455 mr->paddr); in nes_post_send()
3459 mr->npages * 8); in nes_post_send()
3464 (unsigned long long) mr->ibmr.iova, in nes_post_send()
3465 mr->ibmr.length, in nes_post_send()
[all …]
/linux-4.4.14/tools/testing/selftests/powerpc/stringloops/
Dmemcmp_64.S81 mr r3,rC
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_provider.c559 static int iwch_reregister_phys_mem(struct ib_mr *mr, in iwch_reregister_phys_mem() argument
576 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); in iwch_reregister_phys_mem()
579 if (atomic_read(&mr->usecnt)) in iwch_reregister_phys_mem()
582 mhp = to_iwch_mr(mr); in iwch_reregister_phys_mem()
584 php = to_iwch_pd(mr->pd); in iwch_reregister_phys_mem()
Diwch_qp.c152 struct iwch_mr *mhp = to_iwch_mr(wr->mr); in build_memreg()
166 V_FR_PAGE_SIZE(ilog2(wr->mr->page_size) - 12) | in build_memreg()
572 sgl.lkey = mw_bind->bind_info.mr->lkey; in iwch_bind_mw()
580 wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey); in iwch_bind_mw()
/linux-4.4.14/Documentation/cgroups/
Ddevices.txt23 echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow

12