This source file includes following definitions.
- get_sh_mem_bases_32
- get_sh_mem_bases_nybble_64
- dqm_lock
- dqm_unlock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_
25 #define KFD_DEVICE_QUEUE_MANAGER_H_
26
27 #include <linux/rwsem.h>
28 #include <linux/list.h>
29 #include <linux/mutex.h>
30 #include <linux/sched/mm.h>
31 #include "kfd_priv.h"
32 #include "kfd_mqd_manager.h"
33
34
35 struct device_process_node {
36 struct qcm_process_device *qpd;
37 struct list_head list;
38 };
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 struct device_queue_manager_ops {
85 int (*create_queue)(struct device_queue_manager *dqm,
86 struct queue *q,
87 struct qcm_process_device *qpd);
88
89 int (*destroy_queue)(struct device_queue_manager *dqm,
90 struct qcm_process_device *qpd,
91 struct queue *q);
92
93 int (*update_queue)(struct device_queue_manager *dqm,
94 struct queue *q);
95
96 int (*register_process)(struct device_queue_manager *dqm,
97 struct qcm_process_device *qpd);
98
99 int (*unregister_process)(struct device_queue_manager *dqm,
100 struct qcm_process_device *qpd);
101
102 int (*initialize)(struct device_queue_manager *dqm);
103 int (*start)(struct device_queue_manager *dqm);
104 int (*stop)(struct device_queue_manager *dqm);
105 void (*uninitialize)(struct device_queue_manager *dqm);
106 int (*create_kernel_queue)(struct device_queue_manager *dqm,
107 struct kernel_queue *kq,
108 struct qcm_process_device *qpd);
109
110 void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
111 struct kernel_queue *kq,
112 struct qcm_process_device *qpd);
113
114 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
115 struct qcm_process_device *qpd,
116 enum cache_policy default_policy,
117 enum cache_policy alternate_policy,
118 void __user *alternate_aperture_base,
119 uint64_t alternate_aperture_size);
120
121 int (*set_trap_handler)(struct device_queue_manager *dqm,
122 struct qcm_process_device *qpd,
123 uint64_t tba_addr,
124 uint64_t tma_addr);
125
126 int (*process_termination)(struct device_queue_manager *dqm,
127 struct qcm_process_device *qpd);
128
129 int (*evict_process_queues)(struct device_queue_manager *dqm,
130 struct qcm_process_device *qpd);
131 int (*restore_process_queues)(struct device_queue_manager *dqm,
132 struct qcm_process_device *qpd);
133
134 int (*get_wave_state)(struct device_queue_manager *dqm,
135 struct queue *q,
136 void __user *ctl_stack,
137 u32 *ctl_stack_used_size,
138 u32 *save_area_used_size);
139 };
140
141 struct device_queue_manager_asic_ops {
142 int (*update_qpd)(struct device_queue_manager *dqm,
143 struct qcm_process_device *qpd);
144 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
145 struct qcm_process_device *qpd,
146 enum cache_policy default_policy,
147 enum cache_policy alternate_policy,
148 void __user *alternate_aperture_base,
149 uint64_t alternate_aperture_size);
150 void (*init_sdma_vm)(struct device_queue_manager *dqm,
151 struct queue *q,
152 struct qcm_process_device *qpd);
153 struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
154 struct kfd_dev *dev);
155 };
156
157
158
159
160
161
162
163
164
165
166
167
168
169 struct device_queue_manager {
170 struct device_queue_manager_ops ops;
171 struct device_queue_manager_asic_ops asic_ops;
172
173 struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
174 struct packet_manager packets;
175 struct kfd_dev *dev;
176 struct mutex lock_hidden;
177 struct list_head queues;
178 unsigned int saved_flags;
179 unsigned int processes_count;
180 unsigned int queue_count;
181 unsigned int sdma_queue_count;
182 unsigned int xgmi_sdma_queue_count;
183 unsigned int total_queue_count;
184 unsigned int next_pipe_to_allocate;
185 unsigned int *allocated_queues;
186 uint64_t sdma_bitmap;
187 uint64_t xgmi_sdma_bitmap;
188 unsigned int vmid_bitmap;
189 uint64_t pipelines_addr;
190 struct kfd_mem_obj *pipeline_mem;
191 uint64_t fence_gpu_addr;
192 unsigned int *fence_addr;
193 struct kfd_mem_obj *fence_mem;
194 bool active_runlist;
195 int sched_policy;
196
197
198 bool is_hws_hang;
199 struct work_struct hw_exception_work;
200 struct kfd_mem_obj hiq_sdma_mqd;
201 };
202
203 void device_queue_manager_init_cik(
204 struct device_queue_manager_asic_ops *asic_ops);
205 void device_queue_manager_init_cik_hawaii(
206 struct device_queue_manager_asic_ops *asic_ops);
207 void device_queue_manager_init_vi(
208 struct device_queue_manager_asic_ops *asic_ops);
209 void device_queue_manager_init_vi_tonga(
210 struct device_queue_manager_asic_ops *asic_ops);
211 void device_queue_manager_init_v9(
212 struct device_queue_manager_asic_ops *asic_ops);
213 void device_queue_manager_init_v10_navi10(
214 struct device_queue_manager_asic_ops *asic_ops);
215 void program_sh_mem_settings(struct device_queue_manager *dqm,
216 struct qcm_process_device *qpd);
217 unsigned int get_queues_num(struct device_queue_manager *dqm);
218 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
219 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
220 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
221 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
222
223 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
224 {
225 return (pdd->lds_base >> 16) & 0xFF;
226 }
227
228 static inline unsigned int
229 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
230 {
231 return (pdd->lds_base >> 60) & 0x0E;
232 }
233
234
235
236
237
238 static inline void dqm_lock(struct device_queue_manager *dqm)
239 {
240 mutex_lock(&dqm->lock_hidden);
241 dqm->saved_flags = memalloc_nofs_save();
242 }
243 static inline void dqm_unlock(struct device_queue_manager *dqm)
244 {
245 memalloc_nofs_restore(dqm->saved_flags);
246 mutex_unlock(&dqm->lock_hidden);
247 }
248
249 #endif