1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #ifndef _GVT_SCHEDULER_H_
37 #define _GVT_SCHEDULER_H_
38
39 struct intel_gvt_workload_scheduler {
40 struct intel_vgpu *current_vgpu;
41 struct intel_vgpu *next_vgpu;
42 struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
43 bool need_reschedule;
44
45 spinlock_t mmio_context_lock;
46
47 struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
48
49 wait_queue_head_t workload_complete_wq;
50 struct task_struct *thread[I915_NUM_ENGINES];
51 wait_queue_head_t waitq[I915_NUM_ENGINES];
52
53 void *sched_data;
54 struct intel_gvt_sched_policy_ops *sched_ops;
55 };
56
57 #define INDIRECT_CTX_ADDR_MASK 0xffffffc0
58 #define INDIRECT_CTX_SIZE_MASK 0x3f
59 struct shadow_indirect_ctx {
60 struct drm_i915_gem_object *obj;
61 unsigned long guest_gma;
62 unsigned long shadow_gma;
63 void *shadow_va;
64 u32 size;
65 };
66
67 #define PER_CTX_ADDR_MASK 0xfffff000
68 struct shadow_per_ctx {
69 unsigned long guest_gma;
70 unsigned long shadow_gma;
71 unsigned valid;
72 };
73
74 struct intel_shadow_wa_ctx {
75 struct shadow_indirect_ctx indirect_ctx;
76 struct shadow_per_ctx per_ctx;
77
78 };
79
80 struct intel_vgpu_workload {
81 struct intel_vgpu *vgpu;
82 int ring_id;
83 struct i915_request *req;
84
85 bool dispatched;
86 bool shadow;
87 int status;
88
89 struct intel_vgpu_mm *shadow_mm;
90
91
92 int (*prepare)(struct intel_vgpu_workload *);
93 int (*complete)(struct intel_vgpu_workload *);
94 struct list_head list;
95
96 DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
97 void *shadow_ring_buffer_va;
98
99
100 struct execlist_ctx_descriptor_format ctx_desc;
101 struct execlist_ring_context *ring_context;
102 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
103 unsigned long guest_rb_head;
104 bool restore_inhibit;
105 struct intel_vgpu_elsp_dwords elsp_dwords;
106 bool emulate_schedule_in;
107 atomic_t shadow_ctx_active;
108 wait_queue_head_t shadow_ctx_status_wq;
109 u64 ring_context_gpa;
110
111
112 struct list_head shadow_bb;
113 struct intel_shadow_wa_ctx wa_ctx;
114
115
116 u32 oactxctrl;
117 u32 flex_mmio[7];
118 };
119
120 struct intel_vgpu_shadow_bb {
121 struct list_head list;
122 struct drm_i915_gem_object *obj;
123 struct i915_vma *vma;
124 void *va;
125 u32 *bb_start_cmd_va;
126 unsigned int clflush;
127 bool accessing;
128 unsigned long bb_offset;
129 bool ppgtt;
130 };
131
132 #define workload_q_head(vgpu, ring_id) \
133 (&(vgpu->submission.workload_q_head[ring_id]))
134
135 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
136
137 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
138
139 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
140
141 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
142
143 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
144
145 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
146 intel_engine_mask_t engine_mask);
147
148 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
149
150 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
151 intel_engine_mask_t engine_mask,
152 unsigned int interface);
153
154 extern const struct intel_vgpu_submission_ops
155 intel_vgpu_execlist_submission_ops;
156
157 struct intel_vgpu_workload *
158 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
159 struct execlist_ctx_descriptor_format *desc);
160
161 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
162
163 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
164 intel_engine_mask_t engine_mask);
165
166 #endif