This source file includes following definitions.
- amdgpu_gmc_vram_full_visible
- amdgpu_gmc_sign_extend
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #ifndef __AMDGPU_GMC_H__
27 #define __AMDGPU_GMC_H__
28
29 #include <linux/types.h>
30
31 #include "amdgpu_irq.h"
32
33
34 #define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
35 #define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
36
37
38
39
40
41
42
43
44 #define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
45
46
47
48
49 #define AMDGPU_GMC_FAULT_RING_ORDER 8
50 #define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
51
52
53
54
55 #define AMDGPU_GMC_FAULT_HASH_ORDER 8
56 #define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
57
58
59
60
61 #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
62
63 struct firmware;
64
65
66
67
68 struct amdgpu_gmc_fault {
69 uint64_t timestamp;
70 uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
71 uint64_t key:52;
72 };
73
74
75
76
77 struct amdgpu_vmhub {
78 uint32_t ctx0_ptb_addr_lo32;
79 uint32_t ctx0_ptb_addr_hi32;
80 uint32_t vm_inv_eng0_sem;
81 uint32_t vm_inv_eng0_req;
82 uint32_t vm_inv_eng0_ack;
83 uint32_t vm_context0_cntl;
84 uint32_t vm_l2_pro_fault_status;
85 uint32_t vm_l2_pro_fault_cntl;
86 };
87
88
89
90
91 struct amdgpu_gmc_funcs {
92
93 void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
94 uint32_t vmhub, uint32_t flush_type);
95
96 uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
97 uint64_t pd_addr);
98
99 void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
100 unsigned pasid);
101
102 void (*set_prt)(struct amdgpu_device *adev, bool enable);
103
104 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
105 uint32_t flags);
106
107 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
108 u64 *dst, u64 *flags);
109 };
110
111 struct amdgpu_xgmi {
112
113 u64 node_id;
114 u64 hive_id;
115
116 u64 node_segment_size;
117
118 unsigned physical_node_id;
119
120 unsigned num_physical_nodes;
121
122 struct list_head head;
123 bool supported;
124 };
125
126 struct amdgpu_gmc {
127 resource_size_t aper_size;
128 resource_size_t aper_base;
129
130
131 u64 mc_vram_size;
132 u64 visible_vram_size;
133 u64 agp_size;
134 u64 agp_start;
135 u64 agp_end;
136 u64 gart_size;
137 u64 gart_start;
138 u64 gart_end;
139 u64 vram_start;
140 u64 vram_end;
141
142
143
144
145
146
147 u64 fb_start;
148 u64 fb_end;
149 unsigned vram_width;
150 u64 real_vram_size;
151 int vram_mtrr;
152 u64 mc_mask;
153 const struct firmware *fw;
154 uint32_t fw_version;
155 struct amdgpu_irq_src vm_fault;
156 uint32_t vram_type;
157 uint32_t srbm_soft_reset;
158 bool prt_warning;
159 uint64_t stolen_size;
160 uint32_t sdpif_register;
161
162 u64 shared_aperture_start;
163 u64 shared_aperture_end;
164 u64 private_aperture_start;
165 u64 private_aperture_end;
166
167 spinlock_t invalidate_lock;
168 bool translate_further;
169 struct kfd_vm_fault_info *vm_fault_info;
170 atomic_t vm_fault_info_updated;
171
172 struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
173 struct {
174 uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
175 } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
176 uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
177
178 const struct amdgpu_gmc_funcs *gmc_funcs;
179
180 struct amdgpu_xgmi xgmi;
181 struct amdgpu_irq_src ecc_irq;
182 struct ras_common_if *umc_ras_if;
183 struct ras_common_if *mmhub_ras_if;
184 };
185
186 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
187 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
188 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
189 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
190 #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
191
192
193
194
195
196
197
198
199
200 static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
201 {
202 WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
203
204 return (gmc->real_vram_size == gmc->visible_vram_size);
205 }
206
207
208
209
210
211
212 static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
213 {
214 if (addr >= AMDGPU_GMC_HOLE_START)
215 addr |= AMDGPU_GMC_HOLE_END;
216
217 return addr;
218 }
219
220 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
221 uint64_t *addr, uint64_t *flags);
222 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
223 uint32_t gpu_page_idx, uint64_t addr,
224 uint64_t flags);
225 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
226 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
227 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
228 u64 base);
229 void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
230 struct amdgpu_gmc *mc);
231 void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
232 struct amdgpu_gmc *mc);
233 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
234 uint16_t pasid, uint64_t timestamp);
235
236 #endif