This source file includes following definitions.
- intel_guc_send
- intel_guc_send_and_receive
- intel_guc_notify
- intel_guc_to_host_event_handler
- intel_guc_ggtt_offset
- intel_guc_is_supported
- intel_guc_is_enabled
- intel_guc_is_running
- intel_guc_sanitize
- intel_guc_is_submission_supported
- intel_guc_enable_msg
- intel_guc_disable_msg
1
2
3
4
5
6 #ifndef _INTEL_GUC_H_
7 #define _INTEL_GUC_H_
8
9 #include "intel_uncore.h"
10 #include "intel_guc_fw.h"
11 #include "intel_guc_fwif.h"
12 #include "intel_guc_ct.h"
13 #include "intel_guc_log.h"
14 #include "intel_guc_reg.h"
15 #include "intel_uc_fw.h"
16 #include "i915_utils.h"
17 #include "i915_vma.h"
18
19 struct __guc_ads_blob;
20
21
22
23
24
25
26 struct intel_guc {
27 struct intel_uc_fw fw;
28 struct intel_guc_log log;
29 struct intel_guc_ct ct;
30
31
32 spinlock_t irq_lock;
33 unsigned int msg_enabled_mask;
34
35 struct {
36 bool enabled;
37 void (*reset)(struct intel_guc *guc);
38 void (*enable)(struct intel_guc *guc);
39 void (*disable)(struct intel_guc *guc);
40 } interrupts;
41
42 bool submission_supported;
43
44 struct i915_vma *ads_vma;
45 struct __guc_ads_blob *ads_blob;
46
47 struct i915_vma *stage_desc_pool;
48 void *stage_desc_pool_vaddr;
49 struct ida stage_ids;
50 struct i915_vma *shared_data;
51 void *shared_data_vaddr;
52
53 struct intel_guc_client *execbuf_client;
54
55 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
56
57 u32 db_cacheline;
58
59
60 u32 params[GUC_CTL_MAX_DWORDS];
61
62
63 struct {
64 u32 base;
65 unsigned int count;
66 enum forcewake_domains fw_domains;
67 } send_regs;
68
69
70 u32 mmio_msg;
71
72
73 struct mutex send_mutex;
74
75
76 int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
77 u32 *response_buf, u32 response_buf_size);
78
79
80 void (*handler)(struct intel_guc *guc);
81
82
83 void (*notify)(struct intel_guc *guc);
84 };
85
86 static
87 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
88 {
89 return guc->send(guc, action, len, NULL, 0);
90 }
91
92 static inline int
93 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
94 u32 *response_buf, u32 response_buf_size)
95 {
96 return guc->send(guc, action, len, response_buf, response_buf_size);
97 }
98
99 static inline void intel_guc_notify(struct intel_guc *guc)
100 {
101 guc->notify(guc);
102 }
103
104 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
105 {
106 guc->handler(guc);
107 }
108
109
110 #define GUC_GGTT_TOP 0xFEE00000
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
126 struct i915_vma *vma)
127 {
128 u32 offset = i915_ggtt_offset(vma);
129
130 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
131 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
132
133 return offset;
134 }
135
136 void intel_guc_init_early(struct intel_guc *guc);
137 void intel_guc_init_send_regs(struct intel_guc *guc);
138 void intel_guc_write_params(struct intel_guc *guc);
139 int intel_guc_init(struct intel_guc *guc);
140 void intel_guc_fini(struct intel_guc *guc);
141 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
142 u32 *response_buf, u32 response_buf_size);
143 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
144 u32 *response_buf, u32 response_buf_size);
145 void intel_guc_to_host_event_handler(struct intel_guc *guc);
146 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
147 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
148 const u32 *payload, u32 len);
149 int intel_guc_sample_forcewake(struct intel_guc *guc);
150 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
151 int intel_guc_suspend(struct intel_guc *guc);
152 int intel_guc_resume(struct intel_guc *guc);
153 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
154
155 static inline bool intel_guc_is_supported(struct intel_guc *guc)
156 {
157 return intel_uc_fw_is_supported(&guc->fw);
158 }
159
160 static inline bool intel_guc_is_enabled(struct intel_guc *guc)
161 {
162 return intel_uc_fw_is_enabled(&guc->fw);
163 }
164
165 static inline bool intel_guc_is_running(struct intel_guc *guc)
166 {
167 return intel_uc_fw_is_running(&guc->fw);
168 }
169
170 static inline int intel_guc_sanitize(struct intel_guc *guc)
171 {
172 intel_uc_fw_sanitize(&guc->fw);
173 guc->mmio_msg = 0;
174
175 return 0;
176 }
177
178 static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
179 {
180 return guc->submission_supported;
181 }
182
183 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
184 {
185 spin_lock_irq(&guc->irq_lock);
186 guc->msg_enabled_mask |= mask;
187 spin_unlock_irq(&guc->irq_lock);
188 }
189
190 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
191 {
192 spin_lock_irq(&guc->irq_lock);
193 guc->msg_enabled_mask &= ~mask;
194 spin_unlock_irq(&guc->irq_lock);
195 }
196
197 int intel_guc_reset_engine(struct intel_guc *guc,
198 struct intel_engine_cs *engine);
199
200 #endif