1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 *    Jerome Glisse <glisse@freedesktop.org>
29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 *    Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h>
36#include <drm/drm_cache.h>
37#include "amdgpu.h"
38#include "amdgpu_trace.h"
39
40
41int amdgpu_ttm_init(struct amdgpu_device *adev);
42void amdgpu_ttm_fini(struct amdgpu_device *adev);
43
44static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
45						struct ttm_mem_reg *mem)
46{
47	u64 ret = 0;
48	if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
49		ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
50			   adev->mc.visible_vram_size ?
51			   adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
52			   mem->size;
53	}
54	return ret;
55}
56
57static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
58		       struct ttm_mem_reg *old_mem,
59		       struct ttm_mem_reg *new_mem)
60{
61	u64 vis_size;
62	if (!adev)
63		return;
64
65	if (new_mem) {
66		switch (new_mem->mem_type) {
67		case TTM_PL_TT:
68			atomic64_add(new_mem->size, &adev->gtt_usage);
69			break;
70		case TTM_PL_VRAM:
71			atomic64_add(new_mem->size, &adev->vram_usage);
72			vis_size = amdgpu_get_vis_part_size(adev, new_mem);
73			atomic64_add(vis_size, &adev->vram_vis_usage);
74			break;
75		}
76	}
77
78	if (old_mem) {
79		switch (old_mem->mem_type) {
80		case TTM_PL_TT:
81			atomic64_sub(old_mem->size, &adev->gtt_usage);
82			break;
83		case TTM_PL_VRAM:
84			atomic64_sub(old_mem->size, &adev->vram_usage);
85			vis_size = amdgpu_get_vis_part_size(adev, old_mem);
86			atomic64_sub(vis_size, &adev->vram_vis_usage);
87			break;
88		}
89	}
90}
91
92static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
93{
94	struct amdgpu_bo *bo;
95
96	bo = container_of(tbo, struct amdgpu_bo, tbo);
97
98	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
99
100	mutex_lock(&bo->adev->gem.mutex);
101	list_del_init(&bo->list);
102	mutex_unlock(&bo->adev->gem.mutex);
103	drm_gem_object_release(&bo->gem_base);
104	amdgpu_bo_unref(&bo->parent);
105	kfree(bo->metadata);
106	kfree(bo);
107}
108
109bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
110{
111	if (bo->destroy == &amdgpu_ttm_bo_destroy)
112		return true;
113	return false;
114}
115
116static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
117				      struct ttm_placement *placement,
118				      struct ttm_place *placements,
119				      u32 domain, u64 flags)
120{
121	u32 c = 0, i;
122
123	placement->placement = placements;
124	placement->busy_placement = placements;
125
126	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
127		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
128			adev->mc.visible_vram_size < adev->mc.real_vram_size) {
129			placements[c].fpfn =
130				adev->mc.visible_vram_size >> PAGE_SHIFT;
131			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
132				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
133		}
134		placements[c].fpfn = 0;
135		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
136			TTM_PL_FLAG_VRAM;
137		if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
138			placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
139	}
140
141	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
142		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
143			placements[c].fpfn = 0;
144			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
145				TTM_PL_FLAG_UNCACHED;
146		} else {
147			placements[c].fpfn = 0;
148			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
149		}
150	}
151
152	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
153		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
154			placements[c].fpfn = 0;
155			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
156				TTM_PL_FLAG_UNCACHED;
157		} else {
158			placements[c].fpfn = 0;
159			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
160		}
161	}
162
163	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
164		placements[c].fpfn = 0;
165		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
166			AMDGPU_PL_FLAG_GDS;
167	}
168	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
169		placements[c].fpfn = 0;
170		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
171			AMDGPU_PL_FLAG_GWS;
172	}
173	if (domain & AMDGPU_GEM_DOMAIN_OA) {
174		placements[c].fpfn = 0;
175		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
176			AMDGPU_PL_FLAG_OA;
177	}
178
179	if (!c) {
180		placements[c].fpfn = 0;
181		placements[c++].flags = TTM_PL_MASK_CACHING |
182			TTM_PL_FLAG_SYSTEM;
183	}
184	placement->num_placement = c;
185	placement->num_busy_placement = c;
186
187	for (i = 0; i < c; i++) {
188		if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
189			(placements[i].flags & TTM_PL_FLAG_VRAM) &&
190			!placements[i].fpfn)
191			placements[i].lpfn =
192				adev->mc.visible_vram_size >> PAGE_SHIFT;
193		else
194			placements[i].lpfn = 0;
195	}
196}
197
198void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
199{
200	amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
201				  rbo->placements, domain, rbo->flags);
202}
203
204static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
205					struct ttm_placement *placement)
206{
207	BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
208
209	memcpy(bo->placements, placement->placement,
210	       placement->num_placement * sizeof(struct ttm_place));
211	bo->placement.num_placement = placement->num_placement;
212	bo->placement.num_busy_placement = placement->num_busy_placement;
213	bo->placement.placement = bo->placements;
214	bo->placement.busy_placement = bo->placements;
215}
216
217int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
218				unsigned long size, int byte_align,
219				bool kernel, u32 domain, u64 flags,
220				struct sg_table *sg,
221				struct ttm_placement *placement,
222				struct reservation_object *resv,
223				struct amdgpu_bo **bo_ptr)
224{
225	struct amdgpu_bo *bo;
226	enum ttm_bo_type type;
227	unsigned long page_align;
228	size_t acc_size;
229	int r;
230
231	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
232	size = ALIGN(size, PAGE_SIZE);
233
234	if (kernel) {
235		type = ttm_bo_type_kernel;
236	} else if (sg) {
237		type = ttm_bo_type_sg;
238	} else {
239		type = ttm_bo_type_device;
240	}
241	*bo_ptr = NULL;
242
243	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
244				       sizeof(struct amdgpu_bo));
245
246	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
247	if (bo == NULL)
248		return -ENOMEM;
249	r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
250	if (unlikely(r)) {
251		kfree(bo);
252		return r;
253	}
254	bo->adev = adev;
255	INIT_LIST_HEAD(&bo->list);
256	INIT_LIST_HEAD(&bo->va);
257	bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
258				       AMDGPU_GEM_DOMAIN_GTT |
259				       AMDGPU_GEM_DOMAIN_CPU |
260				       AMDGPU_GEM_DOMAIN_GDS |
261				       AMDGPU_GEM_DOMAIN_GWS |
262				       AMDGPU_GEM_DOMAIN_OA);
263
264	bo->flags = flags;
265
266	/* For architectures that don't support WC memory,
267	 * mask out the WC flag from the BO
268	 */
269	if (!drm_arch_can_wc_memory())
270		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
271
272	amdgpu_fill_placement_to_bo(bo, placement);
273	/* Kernel allocation are uninterruptible */
274	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
275			&bo->placement, page_align, !kernel, NULL,
276			acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
277	if (unlikely(r != 0)) {
278		return r;
279	}
280	*bo_ptr = bo;
281
282	trace_amdgpu_bo_create(bo);
283
284	return 0;
285}
286
287int amdgpu_bo_create(struct amdgpu_device *adev,
288		     unsigned long size, int byte_align,
289		     bool kernel, u32 domain, u64 flags,
290		     struct sg_table *sg,
291		     struct reservation_object *resv,
292		     struct amdgpu_bo **bo_ptr)
293{
294	struct ttm_placement placement = {0};
295	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
296
297	memset(&placements, 0,
298	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
299
300	amdgpu_ttm_placement_init(adev, &placement,
301				  placements, domain, flags);
302
303	return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
304					   domain, flags, sg, &placement,
305					   resv, bo_ptr);
306}
307
308int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
309{
310	bool is_iomem;
311	int r;
312
313	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
314		return -EPERM;
315
316	if (bo->kptr) {
317		if (ptr) {
318			*ptr = bo->kptr;
319		}
320		return 0;
321	}
322	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
323	if (r) {
324		return r;
325	}
326	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
327	if (ptr) {
328		*ptr = bo->kptr;
329	}
330	return 0;
331}
332
333void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
334{
335	if (bo->kptr == NULL)
336		return;
337	bo->kptr = NULL;
338	ttm_bo_kunmap(&bo->kmap);
339}
340
341struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
342{
343	if (bo == NULL)
344		return NULL;
345
346	ttm_bo_reference(&bo->tbo);
347	return bo;
348}
349
350void amdgpu_bo_unref(struct amdgpu_bo **bo)
351{
352	struct ttm_buffer_object *tbo;
353
354	if ((*bo) == NULL)
355		return;
356
357	tbo = &((*bo)->tbo);
358	ttm_bo_unref(&tbo);
359	if (tbo == NULL)
360		*bo = NULL;
361}
362
363int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
364			     u64 min_offset, u64 max_offset,
365			     u64 *gpu_addr)
366{
367	int r, i;
368	unsigned fpfn, lpfn;
369
370	if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
371		return -EPERM;
372
373	if (WARN_ON_ONCE(min_offset > max_offset))
374		return -EINVAL;
375
376	if (bo->pin_count) {
377		bo->pin_count++;
378		if (gpu_addr)
379			*gpu_addr = amdgpu_bo_gpu_offset(bo);
380
381		if (max_offset != 0) {
382			u64 domain_start;
383			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
384				domain_start = bo->adev->mc.vram_start;
385			else
386				domain_start = bo->adev->mc.gtt_start;
387			WARN_ON_ONCE(max_offset <
388				     (amdgpu_bo_gpu_offset(bo) - domain_start));
389		}
390
391		return 0;
392	}
393	amdgpu_ttm_placement_from_domain(bo, domain);
394	for (i = 0; i < bo->placement.num_placement; i++) {
395		/* force to pin into visible video ram */
396		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
397		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
398		    (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
399			if (WARN_ON_ONCE(min_offset >
400					 bo->adev->mc.visible_vram_size))
401				return -EINVAL;
402			fpfn = min_offset >> PAGE_SHIFT;
403			lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
404		} else {
405			fpfn = min_offset >> PAGE_SHIFT;
406			lpfn = max_offset >> PAGE_SHIFT;
407		}
408		if (fpfn > bo->placements[i].fpfn)
409			bo->placements[i].fpfn = fpfn;
410		if (!bo->placements[i].lpfn ||
411		    (lpfn && lpfn < bo->placements[i].lpfn))
412			bo->placements[i].lpfn = lpfn;
413		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
414	}
415
416	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
417	if (likely(r == 0)) {
418		bo->pin_count = 1;
419		if (gpu_addr != NULL)
420			*gpu_addr = amdgpu_bo_gpu_offset(bo);
421		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
422			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
423		else
424			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
425	} else {
426		dev_err(bo->adev->dev, "%p pin failed\n", bo);
427	}
428	return r;
429}
430
431int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
432{
433	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
434}
435
436int amdgpu_bo_unpin(struct amdgpu_bo *bo)
437{
438	int r, i;
439
440	if (!bo->pin_count) {
441		dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
442		return 0;
443	}
444	bo->pin_count--;
445	if (bo->pin_count)
446		return 0;
447	for (i = 0; i < bo->placement.num_placement; i++) {
448		bo->placements[i].lpfn = 0;
449		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
450	}
451	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
452	if (likely(r == 0)) {
453		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
454			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
455		else
456			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
457	} else {
458		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
459	}
460	return r;
461}
462
463int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
464{
465	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
466	if (0 && (adev->flags & AMD_IS_APU)) {
467		/* Useless to evict on IGP chips */
468		return 0;
469	}
470	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
471}
472
473void amdgpu_bo_force_delete(struct amdgpu_device *adev)
474{
475	struct amdgpu_bo *bo, *n;
476
477	if (list_empty(&adev->gem.objects)) {
478		return;
479	}
480	dev_err(adev->dev, "Userspace still has active objects !\n");
481	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
482		dev_err(adev->dev, "%p %p %lu %lu force free\n",
483			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
484			*((unsigned long *)&bo->gem_base.refcount));
485		mutex_lock(&bo->adev->gem.mutex);
486		list_del_init(&bo->list);
487		mutex_unlock(&bo->adev->gem.mutex);
488		/* this should unref the ttm bo */
489		drm_gem_object_unreference_unlocked(&bo->gem_base);
490	}
491}
492
493int amdgpu_bo_init(struct amdgpu_device *adev)
494{
495	/* Add an MTRR for the VRAM */
496	adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
497					      adev->mc.aper_size);
498	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
499		adev->mc.mc_vram_size >> 20,
500		(unsigned long long)adev->mc.aper_size >> 20);
501	DRM_INFO("RAM width %dbits DDR\n",
502			adev->mc.vram_width);
503	return amdgpu_ttm_init(adev);
504}
505
506void amdgpu_bo_fini(struct amdgpu_device *adev)
507{
508	amdgpu_ttm_fini(adev);
509	arch_phys_wc_del(adev->mc.vram_mtrr);
510}
511
512int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
513			     struct vm_area_struct *vma)
514{
515	return ttm_fbdev_mmap(vma, &bo->tbo);
516}
517
518int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
519{
520	if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
521		return -EINVAL;
522
523	bo->tiling_flags = tiling_flags;
524	return 0;
525}
526
527void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
528{
529	lockdep_assert_held(&bo->tbo.resv->lock.base);
530
531	if (tiling_flags)
532		*tiling_flags = bo->tiling_flags;
533}
534
535int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
536			    uint32_t metadata_size, uint64_t flags)
537{
538	void *buffer;
539
540	if (!metadata_size) {
541		if (bo->metadata_size) {
542			kfree(bo->metadata);
543			bo->metadata = NULL;
544			bo->metadata_size = 0;
545		}
546		return 0;
547	}
548
549	if (metadata == NULL)
550		return -EINVAL;
551
552	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
553	if (buffer == NULL)
554		return -ENOMEM;
555
556	kfree(bo->metadata);
557	bo->metadata_flags = flags;
558	bo->metadata = buffer;
559	bo->metadata_size = metadata_size;
560
561	return 0;
562}
563
564int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
565			   size_t buffer_size, uint32_t *metadata_size,
566			   uint64_t *flags)
567{
568	if (!buffer && !metadata_size)
569		return -EINVAL;
570
571	if (buffer) {
572		if (buffer_size < bo->metadata_size)
573			return -EINVAL;
574
575		if (bo->metadata_size)
576			memcpy(buffer, bo->metadata, bo->metadata_size);
577	}
578
579	if (metadata_size)
580		*metadata_size = bo->metadata_size;
581	if (flags)
582		*flags = bo->metadata_flags;
583
584	return 0;
585}
586
587void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
588			   struct ttm_mem_reg *new_mem)
589{
590	struct amdgpu_bo *rbo;
591
592	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
593		return;
594
595	rbo = container_of(bo, struct amdgpu_bo, tbo);
596	amdgpu_vm_bo_invalidate(rbo->adev, rbo);
597
598	/* update statistics */
599	if (!new_mem)
600		return;
601
602	/* move_notify is called before move happens */
603	amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
604}
605
606int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
607{
608	struct amdgpu_device *adev;
609	struct amdgpu_bo *abo;
610	unsigned long offset, size, lpfn;
611	int i, r;
612
613	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
614		return 0;
615
616	abo = container_of(bo, struct amdgpu_bo, tbo);
617	adev = abo->adev;
618	if (bo->mem.mem_type != TTM_PL_VRAM)
619		return 0;
620
621	size = bo->mem.num_pages << PAGE_SHIFT;
622	offset = bo->mem.start << PAGE_SHIFT;
623	if ((offset + size) <= adev->mc.visible_vram_size)
624		return 0;
625
626	/* hurrah the memory is not visible ! */
627	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
628	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
629	for (i = 0; i < abo->placement.num_placement; i++) {
630		/* Force into visible VRAM */
631		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
632		    (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
633			abo->placements[i].lpfn = lpfn;
634	}
635	r = ttm_bo_validate(bo, &abo->placement, false, false);
636	if (unlikely(r == -ENOMEM)) {
637		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
638		return ttm_bo_validate(bo, &abo->placement, false, false);
639	} else if (unlikely(r != 0)) {
640		return r;
641	}
642
643	offset = bo->mem.start << PAGE_SHIFT;
644	/* this should never happen */
645	if ((offset + size) > adev->mc.visible_vram_size)
646		return -EINVAL;
647
648	return 0;
649}
650
651/**
652 * amdgpu_bo_fence - add fence to buffer object
653 *
654 * @bo: buffer object in question
655 * @fence: fence to add
656 * @shared: true if fence should be added shared
657 *
658 */
659void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
660		     bool shared)
661{
662	struct reservation_object *resv = bo->tbo.resv;
663
664	if (shared)
665		reservation_object_add_shared_fence(resv, fence);
666	else
667		reservation_object_add_excl_fence(resv, fence);
668}
669