This source file includes following definitions.
- amdgpu_ttm_tt_get_user_pages
- amdgpu_ttm_tt_get_user_pages_done
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #ifndef __AMDGPU_TTM_H__
25 #define __AMDGPU_TTM_H__
26
27 #include "amdgpu.h"
28 #include <drm/gpu_scheduler.h>
29
30 #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
31 #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
32 #define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
33
34 #define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
35 #define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
36 #define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
37
38 #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
39 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
40
41 #define AMDGPU_POISON 0xd0bed0be
42
43 struct amdgpu_mman {
44 struct ttm_bo_device bdev;
45 bool mem_global_referenced;
46 bool initialized;
47 void __iomem *aper_base_kaddr;
48
49 #if defined(CONFIG_DEBUG_FS)
50 struct dentry *debugfs_entries[8];
51 #endif
52
53
54 const struct amdgpu_buffer_funcs *buffer_funcs;
55 struct amdgpu_ring *buffer_funcs_ring;
56 bool buffer_funcs_enabled;
57
58 struct mutex gtt_window_lock;
59
60 struct drm_sched_entity entity;
61 };
62
63 struct amdgpu_copy_mem {
64 struct ttm_buffer_object *bo;
65 struct ttm_mem_reg *mem;
66 unsigned long offset;
67 };
68
69 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
70 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
71
72 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
73 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
74 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
75
76 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
77 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
78 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
79
80 int amdgpu_ttm_init(struct amdgpu_device *adev);
81 void amdgpu_ttm_late_init(struct amdgpu_device *adev);
82 void amdgpu_ttm_fini(struct amdgpu_device *adev);
83 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
84 bool enable);
85
86 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
87 uint64_t dst_offset, uint32_t byte_count,
88 struct dma_resv *resv,
89 struct dma_fence **fence, bool direct_submit,
90 bool vm_needs_flush);
91 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
92 struct amdgpu_copy_mem *src,
93 struct amdgpu_copy_mem *dst,
94 uint64_t size,
95 struct dma_resv *resv,
96 struct dma_fence **f);
97 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
98 uint32_t src_data,
99 struct dma_resv *resv,
100 struct dma_fence **fence);
101
102 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
103 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
104 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
105
106 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
107 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
108 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
109 #else
110 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
111 struct page **pages)
112 {
113 return -EPERM;
114 }
115 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
116 {
117 return false;
118 }
119 #endif
120
121 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
122 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
123 uint32_t flags);
124 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
125 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
126 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
127 unsigned long end);
128 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
129 int *last_invalidated);
130 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
131 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
132 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
133 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
134 struct ttm_mem_reg *mem);
135
136 #endif