This source file includes following definitions.
- is_virtual_machine
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #ifndef AMDGPU_VIRT_H
25 #define AMDGPU_VIRT_H
26
27 #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0)
28 #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1)
29 #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2)
30 #define AMDGPU_PASSTHROUGH_MODE (1 << 3)
31 #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4)
32
33 struct amdgpu_mm_table {
34 struct amdgpu_bo *bo;
35 uint32_t *cpu_addr;
36 uint64_t gpu_addr;
37 };
38
39 #define AMDGPU_VF_ERROR_ENTRY_SIZE 16
40
41
42 struct amdgpu_vf_error_buffer {
43 struct mutex lock;
44 int read_count;
45 int write_count;
46 uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
47 uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
48 uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
49 };
50
51
52
53
54 struct amdgpu_virt_ops {
55 int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
56 int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
57 int (*reset_gpu)(struct amdgpu_device *adev);
58 int (*wait_reset)(struct amdgpu_device *adev);
59 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
60 int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
61 int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
62 };
63
64
65
66
67 struct amdgpu_virt_fw_reserve {
68 struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
69 struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
70 unsigned int checksum_key;
71 };
72
73
74
75
76 #define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
77
78 #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
79 (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
80
81 enum AMDGIM_FEATURE_FLAG {
82
83 AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
84
85 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
86
87 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
88
89 AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
90 };
91
92 struct amd_sriov_msg_pf2vf_info_header {
93
94 uint32_t size;
95
96 uint32_t version;
97
98 uint32_t reserved[2];
99 } __aligned(4);
100 struct amdgim_pf2vf_info_v1 {
101
102 struct amd_sriov_msg_pf2vf_info_header header;
103
104 unsigned int uvd_enc_max_pixels_count;
105
106 unsigned int uvd_enc_max_bandwidth;
107
108 unsigned int vce_enc_max_pixels_count;
109
110 unsigned int vce_enc_max_bandwidth;
111
112 unsigned int mecfw_kboffset;
113
114 unsigned int feature_flags;
115
116 unsigned int checksum;
117 } __aligned(4);
118
119 struct amdgim_pf2vf_info_v2 {
120
121 struct amd_sriov_msg_pf2vf_info_header header;
122
123 uint32_t checksum;
124
125 uint32_t feature_flags;
126
127 uint32_t uvd_enc_max_pixels_count;
128
129 uint32_t uvd_enc_max_bandwidth;
130
131 uint32_t vce_enc_max_pixels_count;
132
133 uint32_t vce_enc_max_bandwidth;
134
135 uint64_t mecfw_kboffset;
136
137 uint32_t mecfw_ksize;
138
139 uint64_t uvdfw_kboffset;
140
141 uint32_t uvdfw_ksize;
142
143 uint64_t vcefw_kboffset;
144
145 uint32_t vcefw_ksize;
146 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
147 } __aligned(4);
148
149
150 struct amd_sriov_msg_vf2pf_info_header {
151
152 uint32_t size;
153
154 uint32_t version;
155
156 uint32_t reserved[2];
157 } __aligned(4);
158
159 struct amdgim_vf2pf_info_v1 {
160
161 struct amd_sriov_msg_vf2pf_info_header header;
162
163 char driver_version[64];
164
165 unsigned int driver_cert;
166
167 unsigned int os_info;
168
169 unsigned int fb_usage;
170
171 unsigned int gfx_usage;
172
173 unsigned int gfx_health;
174
175 unsigned int compute_usage;
176
177 unsigned int compute_health;
178
179 unsigned int vce_enc_usage;
180
181 unsigned int vce_enc_health;
182
183 unsigned int uvd_enc_usage;
184
185 unsigned int uvd_enc_health;
186 unsigned int checksum;
187 } __aligned(4);
188
189 struct amdgim_vf2pf_info_v2 {
190
191 struct amd_sriov_msg_vf2pf_info_header header;
192 uint32_t checksum;
193
194 uint8_t driver_version[64];
195
196 uint32_t driver_cert;
197
198 uint32_t os_info;
199
200 uint32_t fb_usage;
201
202 uint32_t gfx_usage;
203
204 uint32_t gfx_health;
205
206 uint32_t compute_usage;
207
208 uint32_t compute_health;
209
210 uint32_t vce_enc_usage;
211
212 uint32_t vce_enc_health;
213
214 uint32_t uvd_enc_usage;
215
216 uint32_t uvd_enc_health;
217 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
218 } __aligned(4);
219
220 #define AMDGPU_FW_VRAM_VF2PF_VER 2
221 typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
222
223 #define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
224 do { \
225 ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
226 } while (0)
227
228 #define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
229 do { \
230 (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
231 } while (0)
232
233 #define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
234 do { \
235 if (!adev->virt.fw_reserve.p_pf2vf) \
236 *(val) = 0; \
237 else { \
238 if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
239 *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
240 if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
241 *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
242 } \
243 } while (0)
244
245
246 struct amdgpu_virt {
247 uint32_t caps;
248 struct amdgpu_bo *csa_obj;
249 void *csa_cpu_addr;
250 bool chained_ib_support;
251 uint32_t reg_val_offs;
252 struct amdgpu_irq_src ack_irq;
253 struct amdgpu_irq_src rcv_irq;
254 struct work_struct flr_work;
255 struct amdgpu_mm_table mm_table;
256 const struct amdgpu_virt_ops *ops;
257 struct amdgpu_vf_error_buffer vf_errors;
258 struct amdgpu_virt_fw_reserve fw_reserve;
259 uint32_t gim_feature;
260
261 struct mutex dpm_mutex;
262 uint32_t reg_access_mode;
263 };
264
265 #define amdgpu_sriov_enabled(adev) \
266 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
267
268 #define amdgpu_sriov_vf(adev) \
269 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF)
270
271 #define amdgpu_sriov_bios(adev) \
272 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
273
274 #define amdgpu_sriov_runtime(adev) \
275 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
276
277 #define amdgpu_passthrough(adev) \
278 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
279
280 static inline bool is_virtual_machine(void)
281 {
282 #ifdef CONFIG_X86
283 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
284 #else
285 return false;
286 #endif
287 }
288
289 #define amdgim_is_hwperf(adev) \
290 ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
291
292 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
293 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
294 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
295 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
296 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
297 uint32_t reg0, uint32_t rreg1,
298 uint32_t ref, uint32_t mask);
299 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
300 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
301 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
302 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
303 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
304 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
305 int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
306 unsigned int key,
307 unsigned int chksum);
308 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
309 uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
310 uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
311 #endif