This source file includes following definitions.
- scif_insert_tcw
- scif_insert_window
- scif_query_tcw
- scif_query_window
- scif_rma_list_unregister
- scif_unmap_all_windows
- scif_unregister_all_windows
1
2
3
4
5
6
7
8
9 #include "scif_main.h"
10 #include <linux/mmu_notifier.h>
11 #include <linux/highmem.h>
12
13
14
15
16
17
18
19 void scif_insert_tcw(struct scif_window *window, struct list_head *head)
20 {
21 struct scif_window *curr = NULL;
22 struct scif_window *prev = list_entry(head, struct scif_window, list);
23 struct list_head *item;
24
25 INIT_LIST_HEAD(&window->list);
26
27 if (!list_empty(head)) {
28 curr = list_entry(head->prev, struct scif_window, list);
29 if (curr->va_for_temp < window->va_for_temp) {
30 list_add_tail(&window->list, head);
31 return;
32 }
33 }
34 list_for_each(item, head) {
35 curr = list_entry(item, struct scif_window, list);
36 if (curr->va_for_temp > window->va_for_temp)
37 break;
38 prev = curr;
39 }
40 list_add(&window->list, &prev->list);
41 }
42
43
44
45
46
47
48
49 void scif_insert_window(struct scif_window *window, struct list_head *head)
50 {
51 struct scif_window *curr = NULL, *prev = NULL;
52 struct list_head *item;
53
54 INIT_LIST_HEAD(&window->list);
55 list_for_each(item, head) {
56 curr = list_entry(item, struct scif_window, list);
57 if (curr->offset > window->offset)
58 break;
59 prev = curr;
60 }
61 if (!prev)
62 list_add(&window->list, head);
63 else
64 list_add(&window->list, &prev->list);
65 scif_set_window_ref(window, window->nr_pages);
66 }
67
68
69
70
71
72
73
74
75
76 int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
77 {
78 struct list_head *item, *temp, *head = req->head;
79 struct scif_window *window;
80 u64 start_va_window, start_va_req = req->va_for_temp;
81 u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
82
83 if (!req->nr_bytes)
84 return -EINVAL;
85
86
87
88
89 if (!list_empty(head)) {
90 window = list_last_entry(head, struct scif_window, list);
91 end_va_window = window->va_for_temp +
92 (window->nr_pages << PAGE_SHIFT);
93 if (start_va_req > end_va_window)
94 return -ENXIO;
95 }
96 list_for_each_safe(item, temp, head) {
97 window = list_entry(item, struct scif_window, list);
98 start_va_window = window->va_for_temp;
99 end_va_window = window->va_for_temp +
100 (window->nr_pages << PAGE_SHIFT);
101 if (start_va_req < start_va_window &&
102 end_va_req < start_va_window)
103 break;
104 if (start_va_req >= end_va_window)
105 continue;
106 if ((window->prot & req->prot) == req->prot) {
107 if (start_va_req >= start_va_window &&
108 end_va_req <= end_va_window) {
109 *req->out_window = window;
110 return 0;
111 }
112
113 if (start_va_req < start_va_window) {
114 req->nr_bytes +=
115 start_va_window - start_va_req;
116 req->va_for_temp = start_va_window;
117 }
118 if (end_va_req >= end_va_window)
119 req->nr_bytes += end_va_window - end_va_req;
120 }
121
122 __scif_rma_destroy_tcw_helper(window);
123 break;
124 }
125 return -ENXIO;
126 }
127
128
129
130
131
132
133
134
135 int scif_query_window(struct scif_rma_req *req)
136 {
137 struct list_head *item;
138 struct scif_window *window;
139 s64 end_offset, offset = req->offset;
140 u64 tmp_min, nr_bytes_left = req->nr_bytes;
141
142 if (!req->nr_bytes)
143 return -EINVAL;
144
145 list_for_each(item, req->head) {
146 window = list_entry(item, struct scif_window, list);
147 end_offset = window->offset +
148 (window->nr_pages << PAGE_SHIFT);
149 if (offset < window->offset)
150
151 return -ENXIO;
152 if (offset >= end_offset)
153 continue;
154
155 if ((window->prot & req->prot) != req->prot)
156 return -EPERM;
157 if (nr_bytes_left == req->nr_bytes)
158
159 *req->out_window = window;
160 tmp_min = min((u64)end_offset - offset, nr_bytes_left);
161 nr_bytes_left -= tmp_min;
162 offset += tmp_min;
163
164
165
166
167 if (!nr_bytes_left) {
168
169 if (req->type == SCIF_WINDOW_PARTIAL ||
170 req->type == SCIF_WINDOW_SINGLE)
171 return 0;
172
173 if (offset == end_offset)
174
175 return 0;
176
177 return -ENXIO;
178 }
179 if (req->type == SCIF_WINDOW_SINGLE)
180 break;
181 }
182 dev_err(scif_info.mdev.this_device,
183 "%s %d ENXIO\n", __func__, __LINE__);
184 return -ENXIO;
185 }
186
187
188
189
190
191
192
193
194 int scif_rma_list_unregister(struct scif_window *window,
195 s64 offset, int nr_pages)
196 {
197 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
198 struct list_head *head = &ep->rma_info.reg_list;
199 s64 end_offset;
200 int err = 0;
201 int loop_nr_pages;
202 struct scif_window *_window;
203
204 list_for_each_entry_safe_from(window, _window, head, list) {
205 end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
206 loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
207 nr_pages);
208 err = scif_unregister_window(window);
209 if (err)
210 return err;
211 nr_pages -= loop_nr_pages;
212 offset += (loop_nr_pages << PAGE_SHIFT);
213 if (!nr_pages)
214 break;
215 }
216 return 0;
217 }
218
219
220
221
222
223
224
225 void scif_unmap_all_windows(scif_epd_t epd)
226 {
227 struct list_head *item, *tmp;
228 struct scif_window *window;
229 struct scif_endpt *ep = (struct scif_endpt *)epd;
230 struct list_head *head = &ep->rma_info.reg_list;
231
232 mutex_lock(&ep->rma_info.rma_lock);
233 list_for_each_safe(item, tmp, head) {
234 window = list_entry(item, struct scif_window, list);
235 scif_unmap_window(ep->remote_dev, window);
236 }
237 mutex_unlock(&ep->rma_info.rma_lock);
238 }
239
240
241
242
243
244
245
246
247 int scif_unregister_all_windows(scif_epd_t epd)
248 {
249 struct list_head *item, *tmp;
250 struct scif_window *window;
251 struct scif_endpt *ep = (struct scif_endpt *)epd;
252 struct list_head *head = &ep->rma_info.reg_list;
253 int err = 0;
254
255 mutex_lock(&ep->rma_info.rma_lock);
256 retry:
257 item = NULL;
258 tmp = NULL;
259 list_for_each_safe(item, tmp, head) {
260 window = list_entry(item, struct scif_window, list);
261 ep->rma_info.async_list_del = 0;
262 err = scif_unregister_window(window);
263 if (err)
264 dev_err(scif_info.mdev.this_device,
265 "%s %d err %d\n",
266 __func__, __LINE__, err);
267
268
269
270
271 if (READ_ONCE(ep->rma_info.async_list_del))
272 goto retry;
273 }
274 mutex_unlock(&ep->rma_info.rma_lock);
275 if (!list_empty(&ep->rma_info.mmn_list)) {
276 spin_lock(&scif_info.rmalock);
277 list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
278 spin_unlock(&scif_info.rmalock);
279 schedule_work(&scif_info.mmu_notif_work);
280 }
281 return err;
282 }