This source file includes following definitions.
- rvt_mmap_init
- rvt_release_mmap_info
- rvt_vma_open
- rvt_vma_close
- rvt_mmap
- rvt_create_mmap_info
- rvt_update_mmap_info
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <linux/mm.h>
51 #include <asm/pgtable.h>
52 #include <rdma/uverbs_ioctl.h>
53 #include "mmap.h"
54
55
56
57
58
59 void rvt_mmap_init(struct rvt_dev_info *rdi)
60 {
61 INIT_LIST_HEAD(&rdi->pending_mmaps);
62 spin_lock_init(&rdi->pending_lock);
63 rdi->mmap_offset = PAGE_SIZE;
64 spin_lock_init(&rdi->mmap_offset_lock);
65 }
66
67
68
69
70
71 void rvt_release_mmap_info(struct kref *ref)
72 {
73 struct rvt_mmap_info *ip =
74 container_of(ref, struct rvt_mmap_info, ref);
75 struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
76
77 spin_lock_irq(&rdi->pending_lock);
78 list_del(&ip->pending_mmaps);
79 spin_unlock_irq(&rdi->pending_lock);
80
81 vfree(ip->obj);
82 kfree(ip);
83 }
84
85 static void rvt_vma_open(struct vm_area_struct *vma)
86 {
87 struct rvt_mmap_info *ip = vma->vm_private_data;
88
89 kref_get(&ip->ref);
90 }
91
92 static void rvt_vma_close(struct vm_area_struct *vma)
93 {
94 struct rvt_mmap_info *ip = vma->vm_private_data;
95
96 kref_put(&ip->ref, rvt_release_mmap_info);
97 }
98
99 static const struct vm_operations_struct rvt_vm_ops = {
100 .open = rvt_vma_open,
101 .close = rvt_vma_close,
102 };
103
104
105
106
107
108
109
110
111 int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
112 {
113 struct rvt_dev_info *rdi = ib_to_rvt(context->device);
114 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
115 unsigned long size = vma->vm_end - vma->vm_start;
116 struct rvt_mmap_info *ip, *pp;
117 int ret = -EINVAL;
118
119
120
121
122
123
124 spin_lock_irq(&rdi->pending_lock);
125 list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
126 pending_mmaps) {
127
128 if (context != ip->context || (__u64)offset != ip->offset)
129 continue;
130
131 if (size > ip->size)
132 break;
133
134 list_del_init(&ip->pending_mmaps);
135 spin_unlock_irq(&rdi->pending_lock);
136
137 ret = remap_vmalloc_range(vma, ip->obj, 0);
138 if (ret)
139 goto done;
140 vma->vm_ops = &rvt_vm_ops;
141 vma->vm_private_data = ip;
142 rvt_vma_open(vma);
143 goto done;
144 }
145 spin_unlock_irq(&rdi->pending_lock);
146 done:
147 return ret;
148 }
149
150
151
152
153
154
155
156
157
158
159 struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
160 struct ib_udata *udata, void *obj)
161 {
162 struct rvt_mmap_info *ip;
163
164 if (!udata)
165 return ERR_PTR(-EINVAL);
166
167 ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
168 if (!ip)
169 return ERR_PTR(-ENOMEM);
170
171 size = PAGE_ALIGN(size);
172
173 spin_lock_irq(&rdi->mmap_offset_lock);
174 if (rdi->mmap_offset == 0)
175 rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
176 ip->offset = rdi->mmap_offset;
177 rdi->mmap_offset += ALIGN(size, SHMLBA);
178 spin_unlock_irq(&rdi->mmap_offset_lock);
179
180 INIT_LIST_HEAD(&ip->pending_mmaps);
181 ip->size = size;
182 ip->context =
183 container_of(udata, struct uverbs_attr_bundle, driver_udata)
184 ->context;
185 ip->obj = obj;
186 kref_init(&ip->ref);
187
188 return ip;
189 }
190
191
192
193
194
195
196
197
198 void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
199 u32 size, void *obj)
200 {
201 size = PAGE_ALIGN(size);
202
203 spin_lock_irq(&rdi->mmap_offset_lock);
204 if (rdi->mmap_offset == 0)
205 rdi->mmap_offset = PAGE_SIZE;
206 ip->offset = rdi->mmap_offset;
207 rdi->mmap_offset += size;
208 spin_unlock_irq(&rdi->mmap_offset_lock);
209
210 ip->size = size;
211 ip->obj = obj;
212 }