This source file includes following definitions.
- rds_ib_alloc_fmr
- rds_ib_map_fmr
- rds_ib_reg_fmr
- rds_ib_unreg_fmr
- rds_ib_free_fmr_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include "ib_mr.h"
34
35 struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
36 {
37 struct rds_ib_mr_pool *pool;
38 struct rds_ib_mr *ibmr = NULL;
39 struct rds_ib_fmr *fmr;
40 int err = 0;
41
42 if (npages <= RDS_MR_8K_MSG_SIZE)
43 pool = rds_ibdev->mr_8k_pool;
44 else
45 pool = rds_ibdev->mr_1m_pool;
46
47 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
48 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
49
50
51 if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
52 if (pool->pool_type == RDS_IB_MR_8K_POOL)
53 pool = rds_ibdev->mr_1m_pool;
54 else
55 pool = rds_ibdev->mr_8k_pool;
56 }
57
58 ibmr = rds_ib_try_reuse_ibmr(pool);
59 if (ibmr)
60 return ibmr;
61
62 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
63 rdsibdev_to_node(rds_ibdev));
64 if (!ibmr) {
65 err = -ENOMEM;
66 goto out_no_cigar;
67 }
68
69 fmr = &ibmr->u.fmr;
70 fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
71 (IB_ACCESS_LOCAL_WRITE |
72 IB_ACCESS_REMOTE_READ |
73 IB_ACCESS_REMOTE_WRITE |
74 IB_ACCESS_REMOTE_ATOMIC),
75 &pool->fmr_attr);
76 if (IS_ERR(fmr->fmr)) {
77 err = PTR_ERR(fmr->fmr);
78 fmr->fmr = NULL;
79 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
80 goto out_no_cigar;
81 }
82
83 ibmr->pool = pool;
84 if (pool->pool_type == RDS_IB_MR_8K_POOL)
85 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
86 else
87 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
88
89 return ibmr;
90
91 out_no_cigar:
92 kfree(ibmr);
93 atomic_dec(&pool->item_count);
94
95 return ERR_PTR(err);
96 }
97
98 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
99 struct rds_ib_mr *ibmr, struct scatterlist *sg,
100 unsigned int nents)
101 {
102 struct ib_device *dev = rds_ibdev->dev;
103 struct rds_ib_fmr *fmr = &ibmr->u.fmr;
104 struct scatterlist *scat = sg;
105 u64 io_addr = 0;
106 u64 *dma_pages;
107 u32 len;
108 int page_cnt, sg_dma_len;
109 int i, j;
110 int ret;
111
112 sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
113 if (unlikely(!sg_dma_len)) {
114 pr_warn("RDS/IB: %s failed!\n", __func__);
115 return -EBUSY;
116 }
117
118 len = 0;
119 page_cnt = 0;
120
121 for (i = 0; i < sg_dma_len; ++i) {
122 unsigned int dma_len = sg_dma_len(&scat[i]);
123 u64 dma_addr = sg_dma_address(&scat[i]);
124
125 if (dma_addr & ~PAGE_MASK) {
126 if (i > 0) {
127 ib_dma_unmap_sg(dev, sg, nents,
128 DMA_BIDIRECTIONAL);
129 return -EINVAL;
130 } else {
131 ++page_cnt;
132 }
133 }
134 if ((dma_addr + dma_len) & ~PAGE_MASK) {
135 if (i < sg_dma_len - 1) {
136 ib_dma_unmap_sg(dev, sg, nents,
137 DMA_BIDIRECTIONAL);
138 return -EINVAL;
139 } else {
140 ++page_cnt;
141 }
142 }
143
144 len += dma_len;
145 }
146
147 page_cnt += len >> PAGE_SHIFT;
148 if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
149 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
150 return -EINVAL;
151 }
152
153 dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC,
154 rdsibdev_to_node(rds_ibdev));
155 if (!dma_pages) {
156 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
157 return -ENOMEM;
158 }
159
160 page_cnt = 0;
161 for (i = 0; i < sg_dma_len; ++i) {
162 unsigned int dma_len = sg_dma_len(&scat[i]);
163 u64 dma_addr = sg_dma_address(&scat[i]);
164
165 for (j = 0; j < dma_len; j += PAGE_SIZE)
166 dma_pages[page_cnt++] =
167 (dma_addr & PAGE_MASK) + j;
168 }
169
170 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
171 if (ret) {
172 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
173 goto out;
174 }
175
176
177
178
179 rds_ib_teardown_mr(ibmr);
180
181 ibmr->sg = scat;
182 ibmr->sg_len = nents;
183 ibmr->sg_dma_len = sg_dma_len;
184 ibmr->remap_count++;
185
186 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
187 rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
188 else
189 rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
190 ret = 0;
191
192 out:
193 kfree(dma_pages);
194
195 return ret;
196 }
197
198 struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
199 struct scatterlist *sg,
200 unsigned long nents,
201 u32 *key)
202 {
203 struct rds_ib_mr *ibmr = NULL;
204 struct rds_ib_fmr *fmr;
205 int ret;
206
207 ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
208 if (IS_ERR(ibmr))
209 return ibmr;
210
211 ibmr->device = rds_ibdev;
212 fmr = &ibmr->u.fmr;
213 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
214 if (ret == 0)
215 *key = fmr->fmr->rkey;
216 else
217 rds_ib_free_mr(ibmr, 0);
218
219 return ibmr;
220 }
221
222 void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
223 unsigned long *unpinned, unsigned int goal)
224 {
225 struct rds_ib_mr *ibmr, *next;
226 struct rds_ib_fmr *fmr;
227 LIST_HEAD(fmr_list);
228 int ret = 0;
229 unsigned int freed = *nfreed;
230
231
232 list_for_each_entry(ibmr, list, unmap_list) {
233 fmr = &ibmr->u.fmr;
234 list_add(&fmr->fmr->list, &fmr_list);
235 }
236
237 ret = ib_unmap_fmr(&fmr_list);
238 if (ret)
239 pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
240
241
242 list_for_each_entry_safe(ibmr, next, list, unmap_list) {
243 fmr = &ibmr->u.fmr;
244 *unpinned += ibmr->sg_len;
245 __rds_ib_teardown_mr(ibmr);
246 if (freed < goal ||
247 ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
248 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
249 rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
250 else
251 rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
252 list_del(&ibmr->unmap_list);
253 ib_dealloc_fmr(fmr->fmr);
254 kfree(ibmr);
255 freed++;
256 }
257 }
258 *nfreed = freed;
259 }
260
261 void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
262 {
263 struct rds_ib_mr_pool *pool = ibmr->pool;
264
265 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
266 llist_add(&ibmr->llnode, &pool->drop_list);
267 else
268 llist_add(&ibmr->llnode, &pool->free_list);
269 }