This source file includes following definitions.
- amdgpu_pasid_alloc
- amdgpu_pasid_free
- amdgpu_pasid_free_cb
- amdgpu_pasid_free_delayed
- amdgpu_vmid_had_gpu_reset
- amdgpu_vmid_grab_idle
- amdgpu_vmid_grab_reserved
- amdgpu_vmid_grab_used
- amdgpu_vmid_grab
- amdgpu_vmid_alloc_reserved
- amdgpu_vmid_free_reserved
- amdgpu_vmid_reset
- amdgpu_vmid_reset_all
- amdgpu_vmid_mgr_init
- amdgpu_vmid_mgr_fini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include "amdgpu_ids.h"
24
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
27
28
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31
32
33
34
35
36
37
38
39
40
41 static DEFINE_IDA(amdgpu_pasid_ida);
42
43
44 struct amdgpu_pasid_cb {
45 struct dma_fence_cb cb;
46 unsigned int pasid;
47 };
48
49
50
51
52
53
54
55
56
57
58
59
60 int amdgpu_pasid_alloc(unsigned int bits)
61 {
62 int pasid = -EINVAL;
63
64 for (bits = min(bits, 31U); bits > 0; bits--) {
65 pasid = ida_simple_get(&amdgpu_pasid_ida,
66 1U << (bits - 1), 1U << bits,
67 GFP_KERNEL);
68 if (pasid != -ENOSPC)
69 break;
70 }
71
72 if (pasid >= 0)
73 trace_amdgpu_pasid_allocated(pasid);
74
75 return pasid;
76 }
77
78
79
80
81
82 void amdgpu_pasid_free(unsigned int pasid)
83 {
84 trace_amdgpu_pasid_freed(pasid);
85 ida_simple_remove(&amdgpu_pasid_ida, pasid);
86 }
87
88 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89 struct dma_fence_cb *_cb)
90 {
91 struct amdgpu_pasid_cb *cb =
92 container_of(_cb, struct amdgpu_pasid_cb, cb);
93
94 amdgpu_pasid_free(cb->pasid);
95 dma_fence_put(fence);
96 kfree(cb);
97 }
98
99
100
101
102
103
104
105
106
107 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
108 unsigned int pasid)
109 {
110 struct dma_fence *fence, **fences;
111 struct amdgpu_pasid_cb *cb;
112 unsigned count;
113 int r;
114
115 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
116 if (r)
117 goto fallback;
118
119 if (count == 0) {
120 amdgpu_pasid_free(pasid);
121 return;
122 }
123
124 if (count == 1) {
125 fence = fences[0];
126 kfree(fences);
127 } else {
128 uint64_t context = dma_fence_context_alloc(1);
129 struct dma_fence_array *array;
130
131 array = dma_fence_array_create(count, fences, context,
132 1, false);
133 if (!array) {
134 kfree(fences);
135 goto fallback;
136 }
137 fence = &array->base;
138 }
139
140 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
141 if (!cb) {
142
143 dma_fence_wait(fence, false);
144 dma_fence_put(fence);
145 amdgpu_pasid_free(pasid);
146 } else {
147 cb->pasid = pasid;
148 if (dma_fence_add_callback(fence, &cb->cb,
149 amdgpu_pasid_free_cb))
150 amdgpu_pasid_free_cb(fence, &cb->cb);
151 }
152
153 return;
154
155 fallback:
156
157
158
159 dma_resv_wait_timeout_rcu(resv, true, false,
160 MAX_SCHEDULE_TIMEOUT);
161 amdgpu_pasid_free(pasid);
162 }
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
179 struct amdgpu_vmid *id)
180 {
181 return id->current_gpu_reset_count !=
182 atomic_read(&adev->gpu_reset_counter);
183 }
184
185
186
187
188
189
190
191
192
193
194
195
196 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
197 struct amdgpu_ring *ring,
198 struct amdgpu_sync *sync,
199 struct amdgpu_vmid **idle)
200 {
201 struct amdgpu_device *adev = ring->adev;
202 unsigned vmhub = ring->funcs->vmhub;
203 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
204 struct dma_fence **fences;
205 unsigned i;
206 int r;
207
208 if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
209 return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
210
211 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
212 if (!fences)
213 return -ENOMEM;
214
215
216 i = 0;
217 list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
218 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
219 if (!fences[i])
220 break;
221 ++i;
222 }
223
224
225 if (&(*idle)->list == &id_mgr->ids_lru) {
226 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
227 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
228 struct dma_fence_array *array;
229 unsigned j;
230
231 *idle = NULL;
232 for (j = 0; j < i; ++j)
233 dma_fence_get(fences[j]);
234
235 array = dma_fence_array_create(i, fences, fence_context,
236 seqno, true);
237 if (!array) {
238 for (j = 0; j < i; ++j)
239 dma_fence_put(fences[j]);
240 kfree(fences);
241 return -ENOMEM;
242 }
243
244 r = amdgpu_sync_fence(adev, sync, &array->base, false);
245 dma_fence_put(ring->vmid_wait);
246 ring->vmid_wait = &array->base;
247 return r;
248 }
249 kfree(fences);
250
251 return 0;
252 }
253
254
255
256
257
258
259
260
261
262
263
264
265 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
266 struct amdgpu_ring *ring,
267 struct amdgpu_sync *sync,
268 struct dma_fence *fence,
269 struct amdgpu_job *job,
270 struct amdgpu_vmid **id)
271 {
272 struct amdgpu_device *adev = ring->adev;
273 unsigned vmhub = ring->funcs->vmhub;
274 uint64_t fence_context = adev->fence_context + ring->idx;
275 struct dma_fence *updates = sync->last_vm_update;
276 bool needs_flush = vm->use_cpu_for_update;
277 int r = 0;
278
279 *id = vm->reserved_vmid[vmhub];
280 if (updates && (*id)->flushed_updates &&
281 updates->context == (*id)->flushed_updates->context &&
282 !dma_fence_is_later(updates, (*id)->flushed_updates))
283 updates = NULL;
284
285 if ((*id)->owner != vm->entity.fence_context ||
286 job->vm_pd_addr != (*id)->pd_gpu_addr ||
287 updates || !(*id)->last_flush ||
288 ((*id)->last_flush->context != fence_context &&
289 !dma_fence_is_signaled((*id)->last_flush))) {
290 struct dma_fence *tmp;
291
292
293 (*id)->pd_gpu_addr = 0;
294 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
295 if (tmp) {
296 *id = NULL;
297 r = amdgpu_sync_fence(adev, sync, tmp, false);
298 return r;
299 }
300 needs_flush = true;
301 }
302
303
304
305
306 r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
307 if (r)
308 return r;
309
310 if (updates) {
311 dma_fence_put((*id)->flushed_updates);
312 (*id)->flushed_updates = dma_fence_get(updates);
313 }
314 job->vm_needs_flush = needs_flush;
315 return 0;
316 }
317
318
319
320
321
322
323
324
325
326
327
328
329
330 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
331 struct amdgpu_ring *ring,
332 struct amdgpu_sync *sync,
333 struct dma_fence *fence,
334 struct amdgpu_job *job,
335 struct amdgpu_vmid **id)
336 {
337 struct amdgpu_device *adev = ring->adev;
338 unsigned vmhub = ring->funcs->vmhub;
339 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
340 uint64_t fence_context = adev->fence_context + ring->idx;
341 struct dma_fence *updates = sync->last_vm_update;
342 int r;
343
344 job->vm_needs_flush = vm->use_cpu_for_update;
345
346
347 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
348 bool needs_flush = vm->use_cpu_for_update;
349 struct dma_fence *flushed;
350
351
352 if ((*id)->owner != vm->entity.fence_context)
353 continue;
354
355 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
356 continue;
357
358 if (!(*id)->last_flush ||
359 ((*id)->last_flush->context != fence_context &&
360 !dma_fence_is_signaled((*id)->last_flush)))
361 needs_flush = true;
362
363 flushed = (*id)->flushed_updates;
364 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
365 needs_flush = true;
366
367
368
369
370 if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
371 adev->asic_type == CHIP_NAVI10 ||
372 adev->asic_type == CHIP_NAVI14))
373 continue;
374
375
376
377
378 r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
379 if (r)
380 return r;
381
382 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
383 dma_fence_put((*id)->flushed_updates);
384 (*id)->flushed_updates = dma_fence_get(updates);
385 }
386
387 job->vm_needs_flush |= needs_flush;
388 return 0;
389 }
390
391 *id = NULL;
392 return 0;
393 }
394
395
396
397
398
399
400
401
402
403
404
405
406 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
407 struct amdgpu_sync *sync, struct dma_fence *fence,
408 struct amdgpu_job *job)
409 {
410 struct amdgpu_device *adev = ring->adev;
411 unsigned vmhub = ring->funcs->vmhub;
412 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
413 struct amdgpu_vmid *idle = NULL;
414 struct amdgpu_vmid *id = NULL;
415 int r = 0;
416
417 mutex_lock(&id_mgr->lock);
418 r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
419 if (r || !idle)
420 goto error;
421
422 if (vm->reserved_vmid[vmhub]) {
423 r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
424 if (r || !id)
425 goto error;
426 } else {
427 r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
428 if (r)
429 goto error;
430
431 if (!id) {
432 struct dma_fence *updates = sync->last_vm_update;
433
434
435 id = idle;
436
437
438 r = amdgpu_sync_fence(ring->adev, &id->active,
439 fence, false);
440 if (r)
441 goto error;
442
443 dma_fence_put(id->flushed_updates);
444 id->flushed_updates = dma_fence_get(updates);
445 job->vm_needs_flush = true;
446 }
447
448 list_move_tail(&id->list, &id_mgr->ids_lru);
449 }
450
451 id->pd_gpu_addr = job->vm_pd_addr;
452 id->owner = vm->entity.fence_context;
453
454 if (job->vm_needs_flush) {
455 dma_fence_put(id->last_flush);
456 id->last_flush = NULL;
457 }
458 job->vmid = id - id_mgr->ids;
459 job->pasid = vm->pasid;
460 trace_amdgpu_vm_grab_id(vm, ring, job);
461
462 error:
463 mutex_unlock(&id_mgr->lock);
464 return r;
465 }
466
467 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
468 struct amdgpu_vm *vm,
469 unsigned vmhub)
470 {
471 struct amdgpu_vmid_mgr *id_mgr;
472 struct amdgpu_vmid *idle;
473 int r = 0;
474
475 id_mgr = &adev->vm_manager.id_mgr[vmhub];
476 mutex_lock(&id_mgr->lock);
477 if (vm->reserved_vmid[vmhub])
478 goto unlock;
479 if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
480 AMDGPU_VM_MAX_RESERVED_VMID) {
481 DRM_ERROR("Over limitation of reserved vmid\n");
482 atomic_dec(&id_mgr->reserved_vmid_num);
483 r = -EINVAL;
484 goto unlock;
485 }
486
487 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
488 list_del_init(&idle->list);
489 vm->reserved_vmid[vmhub] = idle;
490 mutex_unlock(&id_mgr->lock);
491
492 return 0;
493 unlock:
494 mutex_unlock(&id_mgr->lock);
495 return r;
496 }
497
498 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
499 struct amdgpu_vm *vm,
500 unsigned vmhub)
501 {
502 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
503
504 mutex_lock(&id_mgr->lock);
505 if (vm->reserved_vmid[vmhub]) {
506 list_add(&vm->reserved_vmid[vmhub]->list,
507 &id_mgr->ids_lru);
508 vm->reserved_vmid[vmhub] = NULL;
509 atomic_dec(&id_mgr->reserved_vmid_num);
510 }
511 mutex_unlock(&id_mgr->lock);
512 }
513
514
515
516
517
518
519
520
521
522 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
523 unsigned vmid)
524 {
525 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
526 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
527
528 mutex_lock(&id_mgr->lock);
529 id->owner = 0;
530 id->gds_base = 0;
531 id->gds_size = 0;
532 id->gws_base = 0;
533 id->gws_size = 0;
534 id->oa_base = 0;
535 id->oa_size = 0;
536 mutex_unlock(&id_mgr->lock);
537 }
538
539
540
541
542
543
544
545
546 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
547 {
548 unsigned i, j;
549
550 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
551 struct amdgpu_vmid_mgr *id_mgr =
552 &adev->vm_manager.id_mgr[i];
553
554 for (j = 1; j < id_mgr->num_ids; ++j)
555 amdgpu_vmid_reset(adev, i, j);
556 }
557 }
558
559
560
561
562
563
564
565
566 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
567 {
568 unsigned i, j;
569
570 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
571 struct amdgpu_vmid_mgr *id_mgr =
572 &adev->vm_manager.id_mgr[i];
573
574 mutex_init(&id_mgr->lock);
575 INIT_LIST_HEAD(&id_mgr->ids_lru);
576 atomic_set(&id_mgr->reserved_vmid_num, 0);
577
578
579 for (j = 1; j < id_mgr->num_ids; ++j) {
580 amdgpu_vmid_reset(adev, i, j);
581 amdgpu_sync_create(&id_mgr->ids[j].active);
582 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
583 }
584 }
585 }
586
587
588
589
590
591
592
593
594 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
595 {
596 unsigned i, j;
597
598 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
599 struct amdgpu_vmid_mgr *id_mgr =
600 &adev->vm_manager.id_mgr[i];
601
602 mutex_destroy(&id_mgr->lock);
603 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
604 struct amdgpu_vmid *id = &id_mgr->ids[j];
605
606 amdgpu_sync_free(&id->active);
607 dma_fence_put(id->flushed_updates);
608 dma_fence_put(id->last_flush);
609 dma_fence_put(id->pasid_mapping);
610 }
611 }
612 }