1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Brad Volkin <bradley.d.volkin@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29
30/**
31 * DOC: batch buffer command parser
32 *
33 * Motivation:
34 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
35 * require userspace code to submit batches containing commands such as
36 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
37 * generations of the hardware will noop these commands in "unsecure" batches
38 * (which includes all userspace batches submitted via i915) even though the
39 * commands may be safe and represent the intended programming model of the
40 * device.
41 *
42 * The software command parser is similar in operation to the command parsing
43 * done in hardware for unsecure batches. However, the software parser allows
44 * some operations that would be noop'd by hardware, if the parser determines
45 * the operation is safe, and submits the batch as "secure" to prevent hardware
46 * parsing.
47 *
48 * Threats:
49 * At a high level, the hardware (and software) checks attempt to prevent
50 * granting userspace undue privileges. There are three categories of privilege.
51 *
52 * First, commands which are explicitly defined as privileged or which should
53 * only be used by the kernel driver. The parser generally rejects such
54 * commands, though it may allow some from the drm master process.
55 *
56 * Second, commands which access registers. To support correct/enhanced
57 * userspace functionality, particularly certain OpenGL extensions, the parser
58 * provides a whitelist of registers which userspace may safely access (for both
59 * normal and drm master processes).
60 *
61 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
62 * The parser always rejects such commands.
63 *
64 * The majority of the problematic commands fall in the MI_* range, with only a
65 * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
66 *
67 * Implementation:
68 * Each ring maintains tables of commands and registers which the parser uses in
69 * scanning batch buffers submitted to that ring.
70 *
71 * Since the set of commands that the parser must check for is significantly
72 * smaller than the number of commands supported, the parser tables contain only
73 * those commands required by the parser. This generally works because command
74 * opcode ranges have standard command length encodings. So for commands that
75 * the parser does not need to check, it can easily skip them. This is
76 * implemented via a per-ring length decoding vfunc.
77 *
78 * Unfortunately, there are a number of commands that do not follow the standard
79 * length encoding for their opcode range, primarily amongst the MI_* commands.
80 * To handle this, the parser provides a way to define explicit "skip" entries
81 * in the per-ring command tables.
82 *
83 * Other command table entries map fairly directly to high level categories
84 * mentioned above: rejected, master-only, register whitelist. The parser
85 * implements a number of checks, including the privileged memory checks, via a
86 * general bitmasking mechanism.
87 */
88
89#define STD_MI_OPCODE_MASK  0xFF800000
90#define STD_3D_OPCODE_MASK  0xFFFF0000
91#define STD_2D_OPCODE_MASK  0xFFC00000
92#define STD_MFX_OPCODE_MASK 0xFFFF0000
93
94#define CMD(op, opm, f, lm, fl, ...)				\
95	{							\
96		.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),	\
97		.cmd = { (op), (opm) }, 			\
98		.length = { (lm) },				\
99		__VA_ARGS__					\
100	}
101
102/* Convenience macros to compress the tables */
103#define SMI STD_MI_OPCODE_MASK
104#define S3D STD_3D_OPCODE_MASK
105#define S2D STD_2D_OPCODE_MASK
106#define SMFX STD_MFX_OPCODE_MASK
107#define F true
108#define S CMD_DESC_SKIP
109#define R CMD_DESC_REJECT
110#define W CMD_DESC_REGISTER
111#define B CMD_DESC_BITMASK
112#define M CMD_DESC_MASTER
113
114/*            Command                          Mask   Fixed Len   Action
115	      ---------------------------------------------------------- */
116static const struct drm_i915_cmd_descriptor common_cmds[] = {
117	CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
118	CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
119	CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      M  ),
120	CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
121	CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
122	CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
123	CMD(  MI_SEMAPHORE_MBOX,                SMI,   !F,  0xFF,   R  ),
124	CMD(  MI_STORE_DWORD_INDEX,             SMI,   !F,  0xFF,   R  ),
125	CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
126	      .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
127	CMD(  MI_STORE_REGISTER_MEM(1),         SMI,   !F,  0xFF,   W | B,
128	      .reg = { .offset = 1, .mask = 0x007FFFFC },
129	      .bits = {{
130			.offset = 0,
131			.mask = MI_GLOBAL_GTT,
132			.expected = 0,
133	      }},						       ),
134	CMD(  MI_LOAD_REGISTER_MEM,             SMI,   !F,  0xFF,   W | B,
135	      .reg = { .offset = 1, .mask = 0x007FFFFC },
136	      .bits = {{
137			.offset = 0,
138			.mask = MI_GLOBAL_GTT,
139			.expected = 0,
140	      }},						       ),
141	/*
142	 * MI_BATCH_BUFFER_START requires some special handling. It's not
143	 * really a 'skip' action but it doesn't seem like it's worth adding
144	 * a new action. See i915_parse_cmds().
145	 */
146	CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
147};
148
149static const struct drm_i915_cmd_descriptor render_cmds[] = {
150	CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
151	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
152	CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
153	CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
154	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
155	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
156	CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
157	CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
158	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
159	      .bits = {{
160			.offset = 0,
161			.mask = MI_GLOBAL_GTT,
162			.expected = 0,
163	      }},						       ),
164	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0xFF,   R  ),
165	CMD(  MI_CLFLUSH,                       SMI,   !F,  0x3FF,  B,
166	      .bits = {{
167			.offset = 0,
168			.mask = MI_GLOBAL_GTT,
169			.expected = 0,
170	      }},						       ),
171	CMD(  MI_REPORT_PERF_COUNT,             SMI,   !F,  0x3F,   B,
172	      .bits = {{
173			.offset = 1,
174			.mask = MI_REPORT_PERF_COUNT_GGTT,
175			.expected = 0,
176	      }},						       ),
177	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
178	      .bits = {{
179			.offset = 0,
180			.mask = MI_GLOBAL_GTT,
181			.expected = 0,
182	      }},						       ),
183	CMD(  GFX_OP_3DSTATE_VF_STATISTICS,     S3D,    F,  1,      S  ),
184	CMD(  PIPELINE_SELECT,                  S3D,    F,  1,      S  ),
185	CMD(  MEDIA_VFE_STATE,			S3D,   !F,  0xFFFF, B,
186	      .bits = {{
187			.offset = 2,
188			.mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
189			.expected = 0,
190	      }},						       ),
191	CMD(  GPGPU_OBJECT,                     S3D,   !F,  0xFF,   S  ),
192	CMD(  GPGPU_WALKER,                     S3D,   !F,  0xFF,   S  ),
193	CMD(  GFX_OP_3DSTATE_SO_DECL_LIST,      S3D,   !F,  0x1FF,  S  ),
194	CMD(  GFX_OP_PIPE_CONTROL(5),           S3D,   !F,  0xFF,   B,
195	      .bits = {{
196			.offset = 1,
197			.mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
198			.expected = 0,
199	      },
200	      {
201			.offset = 1,
202		        .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
203				 PIPE_CONTROL_STORE_DATA_INDEX),
204			.expected = 0,
205			.condition_offset = 1,
206			.condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
207	      }},						       ),
208};
209
210static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
211	CMD(  MI_SET_PREDICATE,                 SMI,    F,  1,      S  ),
212	CMD(  MI_RS_CONTROL,                    SMI,    F,  1,      S  ),
213	CMD(  MI_URB_ATOMIC_ALLOC,              SMI,    F,  1,      S  ),
214	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
215	CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
216	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
217	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
218	CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   R  ),
219	CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
220	CMD(  MI_LOAD_URB_MEM,                  SMI,   !F,  0xFF,   S  ),
221	CMD(  MI_STORE_URB_MEM,                 SMI,   !F,  0xFF,   S  ),
222	CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_VS,  S3D,   !F,  0x7FF,  S  ),
223	CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_PS,  S3D,   !F,  0x7FF,  S  ),
224
225	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS,  S3D,   !F,  0x1FF,  S  ),
226	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS,  S3D,   !F,  0x1FF,  S  ),
227	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS,  S3D,   !F,  0x1FF,  S  ),
228	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS,  S3D,   !F,  0x1FF,  S  ),
229	CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS,  S3D,   !F,  0x1FF,  S  ),
230};
231
232static const struct drm_i915_cmd_descriptor video_cmds[] = {
233	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
234	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
235	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
236	      .bits = {{
237			.offset = 0,
238			.mask = MI_GLOBAL_GTT,
239			.expected = 0,
240	      }},						       ),
241	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
242	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
243	      .bits = {{
244			.offset = 0,
245			.mask = MI_FLUSH_DW_NOTIFY,
246			.expected = 0,
247	      },
248	      {
249			.offset = 1,
250			.mask = MI_FLUSH_DW_USE_GTT,
251			.expected = 0,
252			.condition_offset = 0,
253			.condition_mask = MI_FLUSH_DW_OP_MASK,
254	      },
255	      {
256			.offset = 0,
257			.mask = MI_FLUSH_DW_STORE_INDEX,
258			.expected = 0,
259			.condition_offset = 0,
260			.condition_mask = MI_FLUSH_DW_OP_MASK,
261	      }},						       ),
262	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
263	      .bits = {{
264			.offset = 0,
265			.mask = MI_GLOBAL_GTT,
266			.expected = 0,
267	      }},						       ),
268	/*
269	 * MFX_WAIT doesn't fit the way we handle length for most commands.
270	 * It has a length field but it uses a non-standard length bias.
271	 * It is always 1 dword though, so just treat it as fixed length.
272	 */
273	CMD(  MFX_WAIT,                         SMFX,   F,  1,      S  ),
274};
275
276static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
277	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
278	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
279	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
280	      .bits = {{
281			.offset = 0,
282			.mask = MI_GLOBAL_GTT,
283			.expected = 0,
284	      }},						       ),
285	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
286	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
287	      .bits = {{
288			.offset = 0,
289			.mask = MI_FLUSH_DW_NOTIFY,
290			.expected = 0,
291	      },
292	      {
293			.offset = 1,
294			.mask = MI_FLUSH_DW_USE_GTT,
295			.expected = 0,
296			.condition_offset = 0,
297			.condition_mask = MI_FLUSH_DW_OP_MASK,
298	      },
299	      {
300			.offset = 0,
301			.mask = MI_FLUSH_DW_STORE_INDEX,
302			.expected = 0,
303			.condition_offset = 0,
304			.condition_mask = MI_FLUSH_DW_OP_MASK,
305	      }},						       ),
306	CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
307	      .bits = {{
308			.offset = 0,
309			.mask = MI_GLOBAL_GTT,
310			.expected = 0,
311	      }},						       ),
312};
313
314static const struct drm_i915_cmd_descriptor blt_cmds[] = {
315	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
316	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  B,
317	      .bits = {{
318			.offset = 0,
319			.mask = MI_GLOBAL_GTT,
320			.expected = 0,
321	      }},						       ),
322	CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
323	CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
324	      .bits = {{
325			.offset = 0,
326			.mask = MI_FLUSH_DW_NOTIFY,
327			.expected = 0,
328	      },
329	      {
330			.offset = 1,
331			.mask = MI_FLUSH_DW_USE_GTT,
332			.expected = 0,
333			.condition_offset = 0,
334			.condition_mask = MI_FLUSH_DW_OP_MASK,
335	      },
336	      {
337			.offset = 0,
338			.mask = MI_FLUSH_DW_STORE_INDEX,
339			.expected = 0,
340			.condition_offset = 0,
341			.condition_mask = MI_FLUSH_DW_OP_MASK,
342	      }},						       ),
343	CMD(  COLOR_BLT,                        S2D,   !F,  0x3F,   S  ),
344	CMD(  SRC_COPY_BLT,                     S2D,   !F,  0x3F,   S  ),
345};
346
347static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
348	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
349	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
350};
351
352#undef CMD
353#undef SMI
354#undef S3D
355#undef S2D
356#undef SMFX
357#undef F
358#undef S
359#undef R
360#undef W
361#undef B
362#undef M
363
364static const struct drm_i915_cmd_table gen7_render_cmds[] = {
365	{ common_cmds, ARRAY_SIZE(common_cmds) },
366	{ render_cmds, ARRAY_SIZE(render_cmds) },
367};
368
369static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
370	{ common_cmds, ARRAY_SIZE(common_cmds) },
371	{ render_cmds, ARRAY_SIZE(render_cmds) },
372	{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
373};
374
375static const struct drm_i915_cmd_table gen7_video_cmds[] = {
376	{ common_cmds, ARRAY_SIZE(common_cmds) },
377	{ video_cmds, ARRAY_SIZE(video_cmds) },
378};
379
380static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
381	{ common_cmds, ARRAY_SIZE(common_cmds) },
382	{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
383};
384
385static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
386	{ common_cmds, ARRAY_SIZE(common_cmds) },
387	{ blt_cmds, ARRAY_SIZE(blt_cmds) },
388};
389
390static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
391	{ common_cmds, ARRAY_SIZE(common_cmds) },
392	{ blt_cmds, ARRAY_SIZE(blt_cmds) },
393	{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
394};
395
396/*
397 * Register whitelists, sorted by increasing register offset.
398 *
399 * Some registers that userspace accesses are 64 bits. The register
400 * access commands only allow 32-bit accesses. Hence, we have to include
401 * entries for both halves of the 64-bit registers.
402 */
403
404/* Convenience macro for adding 64-bit registers */
405#define REG64(addr) (addr), (addr + sizeof(u32))
406
407static const u32 gen7_render_regs[] = {
408	REG64(GPGPU_THREADS_DISPATCHED),
409	REG64(HS_INVOCATION_COUNT),
410	REG64(DS_INVOCATION_COUNT),
411	REG64(IA_VERTICES_COUNT),
412	REG64(IA_PRIMITIVES_COUNT),
413	REG64(VS_INVOCATION_COUNT),
414	REG64(GS_INVOCATION_COUNT),
415	REG64(GS_PRIMITIVES_COUNT),
416	REG64(CL_INVOCATION_COUNT),
417	REG64(CL_PRIMITIVES_COUNT),
418	REG64(PS_INVOCATION_COUNT),
419	REG64(PS_DEPTH_COUNT),
420	OACONTROL, /* Only allowed for LRI and SRM. See below. */
421	REG64(MI_PREDICATE_SRC0),
422	REG64(MI_PREDICATE_SRC1),
423	GEN7_3DPRIM_END_OFFSET,
424	GEN7_3DPRIM_START_VERTEX,
425	GEN7_3DPRIM_VERTEX_COUNT,
426	GEN7_3DPRIM_INSTANCE_COUNT,
427	GEN7_3DPRIM_START_INSTANCE,
428	GEN7_3DPRIM_BASE_VERTEX,
429	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
430	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
431	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
432	REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
433	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
434	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
435	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
436	REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
437	GEN7_SO_WRITE_OFFSET(0),
438	GEN7_SO_WRITE_OFFSET(1),
439	GEN7_SO_WRITE_OFFSET(2),
440	GEN7_SO_WRITE_OFFSET(3),
441	GEN7_L3SQCREG1,
442	GEN7_L3CNTLREG2,
443	GEN7_L3CNTLREG3,
444};
445
446static const u32 gen7_blt_regs[] = {
447	BCS_SWCTRL,
448};
449
450static const u32 ivb_master_regs[] = {
451	FORCEWAKE_MT,
452	DERRMR,
453	GEN7_PIPE_DE_LOAD_SL(PIPE_A),
454	GEN7_PIPE_DE_LOAD_SL(PIPE_B),
455	GEN7_PIPE_DE_LOAD_SL(PIPE_C),
456};
457
458static const u32 hsw_master_regs[] = {
459	FORCEWAKE_MT,
460	DERRMR,
461};
462
463#undef REG64
464
465static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
466{
467	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
468	u32 subclient =
469		(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
470
471	if (client == INSTR_MI_CLIENT)
472		return 0x3F;
473	else if (client == INSTR_RC_CLIENT) {
474		if (subclient == INSTR_MEDIA_SUBCLIENT)
475			return 0xFFFF;
476		else
477			return 0xFF;
478	}
479
480	DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
481	return 0;
482}
483
484static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
485{
486	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
487	u32 subclient =
488		(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
489	u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
490
491	if (client == INSTR_MI_CLIENT)
492		return 0x3F;
493	else if (client == INSTR_RC_CLIENT) {
494		if (subclient == INSTR_MEDIA_SUBCLIENT) {
495			if (op == 6)
496				return 0xFFFF;
497			else
498				return 0xFFF;
499		} else
500			return 0xFF;
501	}
502
503	DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
504	return 0;
505}
506
507static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
508{
509	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
510
511	if (client == INSTR_MI_CLIENT)
512		return 0x3F;
513	else if (client == INSTR_BC_CLIENT)
514		return 0xFF;
515
516	DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
517	return 0;
518}
519
520static bool validate_cmds_sorted(struct intel_engine_cs *ring,
521				 const struct drm_i915_cmd_table *cmd_tables,
522				 int cmd_table_count)
523{
524	int i;
525	bool ret = true;
526
527	if (!cmd_tables || cmd_table_count == 0)
528		return true;
529
530	for (i = 0; i < cmd_table_count; i++) {
531		const struct drm_i915_cmd_table *table = &cmd_tables[i];
532		u32 previous = 0;
533		int j;
534
535		for (j = 0; j < table->count; j++) {
536			const struct drm_i915_cmd_descriptor *desc =
537				&table->table[i];
538			u32 curr = desc->cmd.value & desc->cmd.mask;
539
540			if (curr < previous) {
541				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
542					  ring->id, i, j, curr, previous);
543				ret = false;
544			}
545
546			previous = curr;
547		}
548	}
549
550	return ret;
551}
552
553static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
554{
555	int i;
556	u32 previous = 0;
557	bool ret = true;
558
559	for (i = 0; i < reg_count; i++) {
560		u32 curr = reg_table[i];
561
562		if (curr < previous) {
563			DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
564				  ring_id, i, curr, previous);
565			ret = false;
566		}
567
568		previous = curr;
569	}
570
571	return ret;
572}
573
574static bool validate_regs_sorted(struct intel_engine_cs *ring)
575{
576	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
577		check_sorted(ring->id, ring->master_reg_table,
578			     ring->master_reg_count);
579}
580
581struct cmd_node {
582	const struct drm_i915_cmd_descriptor *desc;
583	struct hlist_node node;
584};
585
586/*
587 * Different command ranges have different numbers of bits for the opcode. For
588 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
589 * problem is that, for example, MI commands use bits 22:16 for other fields
590 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
591 * we mask a command from a batch it could hash to the wrong bucket due to
592 * non-opcode bits being set. But if we don't include those bits, some 3D
593 * commands may hash to the same bucket due to not including opcode bits that
594 * make the command unique. For now, we will risk hashing to the same bucket.
595 *
596 * If we attempt to generate a perfect hash, we should be able to look at bits
597 * 31:29 of a command from a batch buffer and use the full mask for that
598 * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
599 */
600#define CMD_HASH_MASK STD_MI_OPCODE_MASK
601
602static int init_hash_table(struct intel_engine_cs *ring,
603			   const struct drm_i915_cmd_table *cmd_tables,
604			   int cmd_table_count)
605{
606	int i, j;
607
608	hash_init(ring->cmd_hash);
609
610	for (i = 0; i < cmd_table_count; i++) {
611		const struct drm_i915_cmd_table *table = &cmd_tables[i];
612
613		for (j = 0; j < table->count; j++) {
614			const struct drm_i915_cmd_descriptor *desc =
615				&table->table[j];
616			struct cmd_node *desc_node =
617				kmalloc(sizeof(*desc_node), GFP_KERNEL);
618
619			if (!desc_node)
620				return -ENOMEM;
621
622			desc_node->desc = desc;
623			hash_add(ring->cmd_hash, &desc_node->node,
624				 desc->cmd.value & CMD_HASH_MASK);
625		}
626	}
627
628	return 0;
629}
630
631static void fini_hash_table(struct intel_engine_cs *ring)
632{
633	struct hlist_node *tmp;
634	struct cmd_node *desc_node;
635	int i;
636
637	hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
638		hash_del(&desc_node->node);
639		kfree(desc_node);
640	}
641}
642
643/**
644 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
645 * @ring: the ringbuffer to initialize
646 *
647 * Optionally initializes fields related to batch buffer command parsing in the
648 * struct intel_engine_cs based on whether the platform requires software
649 * command parsing.
650 *
651 * Return: non-zero if initialization fails
652 */
653int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
654{
655	const struct drm_i915_cmd_table *cmd_tables;
656	int cmd_table_count;
657	int ret;
658
659	if (!IS_GEN7(ring->dev))
660		return 0;
661
662	switch (ring->id) {
663	case RCS:
664		if (IS_HASWELL(ring->dev)) {
665			cmd_tables = hsw_render_ring_cmds;
666			cmd_table_count =
667				ARRAY_SIZE(hsw_render_ring_cmds);
668		} else {
669			cmd_tables = gen7_render_cmds;
670			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
671		}
672
673		ring->reg_table = gen7_render_regs;
674		ring->reg_count = ARRAY_SIZE(gen7_render_regs);
675
676		if (IS_HASWELL(ring->dev)) {
677			ring->master_reg_table = hsw_master_regs;
678			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
679		} else {
680			ring->master_reg_table = ivb_master_regs;
681			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
682		}
683
684		ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
685		break;
686	case VCS:
687		cmd_tables = gen7_video_cmds;
688		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
689		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
690		break;
691	case BCS:
692		if (IS_HASWELL(ring->dev)) {
693			cmd_tables = hsw_blt_ring_cmds;
694			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
695		} else {
696			cmd_tables = gen7_blt_cmds;
697			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
698		}
699
700		ring->reg_table = gen7_blt_regs;
701		ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
702
703		if (IS_HASWELL(ring->dev)) {
704			ring->master_reg_table = hsw_master_regs;
705			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
706		} else {
707			ring->master_reg_table = ivb_master_regs;
708			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
709		}
710
711		ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
712		break;
713	case VECS:
714		cmd_tables = hsw_vebox_cmds;
715		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
716		/* VECS can use the same length_mask function as VCS */
717		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
718		break;
719	default:
720		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
721			  ring->id);
722		BUG();
723	}
724
725	BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
726	BUG_ON(!validate_regs_sorted(ring));
727
728	WARN_ON(!hash_empty(ring->cmd_hash));
729
730	ret = init_hash_table(ring, cmd_tables, cmd_table_count);
731	if (ret) {
732		DRM_ERROR("CMD: cmd_parser_init failed!\n");
733		fini_hash_table(ring);
734		return ret;
735	}
736
737	ring->needs_cmd_parser = true;
738
739	return 0;
740}
741
742/**
743 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
744 * @ring: the ringbuffer to clean up
745 *
746 * Releases any resources related to command parsing that may have been
747 * initialized for the specified ring.
748 */
749void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
750{
751	if (!ring->needs_cmd_parser)
752		return;
753
754	fini_hash_table(ring);
755}
756
757static const struct drm_i915_cmd_descriptor*
758find_cmd_in_table(struct intel_engine_cs *ring,
759		  u32 cmd_header)
760{
761	struct cmd_node *desc_node;
762
763	hash_for_each_possible(ring->cmd_hash, desc_node, node,
764			       cmd_header & CMD_HASH_MASK) {
765		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
766		u32 masked_cmd = desc->cmd.mask & cmd_header;
767		u32 masked_value = desc->cmd.value & desc->cmd.mask;
768
769		if (masked_cmd == masked_value)
770			return desc;
771	}
772
773	return NULL;
774}
775
776/*
777 * Returns a pointer to a descriptor for the command specified by cmd_header.
778 *
779 * The caller must supply space for a default descriptor via the default_desc
780 * parameter. If no descriptor for the specified command exists in the ring's
781 * command parser tables, this function fills in default_desc based on the
782 * ring's default length encoding and returns default_desc.
783 */
784static const struct drm_i915_cmd_descriptor*
785find_cmd(struct intel_engine_cs *ring,
786	 u32 cmd_header,
787	 struct drm_i915_cmd_descriptor *default_desc)
788{
789	const struct drm_i915_cmd_descriptor *desc;
790	u32 mask;
791
792	desc = find_cmd_in_table(ring, cmd_header);
793	if (desc)
794		return desc;
795
796	mask = ring->get_cmd_length_mask(cmd_header);
797	if (!mask)
798		return NULL;
799
800	BUG_ON(!default_desc);
801	default_desc->flags = CMD_DESC_SKIP;
802	default_desc->length.mask = mask;
803
804	return default_desc;
805}
806
807static bool valid_reg(const u32 *table, int count, u32 addr)
808{
809	if (table && count != 0) {
810		int i;
811
812		for (i = 0; i < count; i++) {
813			if (table[i] == addr)
814				return true;
815		}
816	}
817
818	return false;
819}
820
821static u32 *vmap_batch(struct drm_i915_gem_object *obj,
822		       unsigned start, unsigned len)
823{
824	int i;
825	void *addr = NULL;
826	struct sg_page_iter sg_iter;
827	int first_page = start >> PAGE_SHIFT;
828	int last_page = (len + start + 4095) >> PAGE_SHIFT;
829	int npages = last_page - first_page;
830	struct page **pages;
831
832	pages = drm_malloc_ab(npages, sizeof(*pages));
833	if (pages == NULL) {
834		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
835		goto finish;
836	}
837
838	i = 0;
839	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
840		pages[i++] = sg_page_iter_page(&sg_iter);
841		if (i == npages)
842			break;
843	}
844
845	addr = vmap(pages, i, 0, PAGE_KERNEL);
846	if (addr == NULL) {
847		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
848		goto finish;
849	}
850
851finish:
852	if (pages)
853		drm_free_large(pages);
854	return (u32*)addr;
855}
856
857/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
858static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
859		       struct drm_i915_gem_object *src_obj,
860		       u32 batch_start_offset,
861		       u32 batch_len)
862{
863	int needs_clflush = 0;
864	void *src_base, *src;
865	void *dst = NULL;
866	int ret;
867
868	if (batch_len > dest_obj->base.size ||
869	    batch_len + batch_start_offset > src_obj->base.size)
870		return ERR_PTR(-E2BIG);
871
872	ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
873	if (ret) {
874		DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
875		return ERR_PTR(ret);
876	}
877
878	src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
879	if (!src_base) {
880		DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
881		ret = -ENOMEM;
882		goto unpin_src;
883	}
884
885	ret = i915_gem_object_get_pages(dest_obj);
886	if (ret) {
887		DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
888		goto unmap_src;
889	}
890	i915_gem_object_pin_pages(dest_obj);
891
892	ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
893	if (ret) {
894		DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
895		goto unmap_src;
896	}
897
898	dst = vmap_batch(dest_obj, 0, batch_len);
899	if (!dst) {
900		DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
901		i915_gem_object_unpin_pages(dest_obj);
902		ret = -ENOMEM;
903		goto unmap_src;
904	}
905
906	src = src_base + offset_in_page(batch_start_offset);
907	if (needs_clflush)
908		drm_clflush_virt_range(src, batch_len);
909
910	memcpy(dst, src, batch_len);
911
912unmap_src:
913	vunmap(src_base);
914unpin_src:
915	i915_gem_object_unpin_pages(src_obj);
916
917	return ret ? ERR_PTR(ret) : dst;
918}
919
920/**
921 * i915_needs_cmd_parser() - should a given ring use software command parsing?
922 * @ring: the ring in question
923 *
924 * Only certain platforms require software batch buffer command parsing, and
925 * only when enabled via module parameter.
926 *
927 * Return: true if the ring requires software command parsing
928 */
929bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
930{
931	if (!ring->needs_cmd_parser)
932		return false;
933
934	if (!USES_PPGTT(ring->dev))
935		return false;
936
937	return (i915.enable_cmd_parser == 1);
938}
939
940static bool check_cmd(const struct intel_engine_cs *ring,
941		      const struct drm_i915_cmd_descriptor *desc,
942		      const u32 *cmd,
943		      const bool is_master,
944		      bool *oacontrol_set)
945{
946	if (desc->flags & CMD_DESC_REJECT) {
947		DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
948		return false;
949	}
950
951	if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
952		DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
953				 *cmd);
954		return false;
955	}
956
957	if (desc->flags & CMD_DESC_REGISTER) {
958		u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
959
960		/*
961		 * OACONTROL requires some special handling for writes. We
962		 * want to make sure that any batch which enables OA also
963		 * disables it before the end of the batch. The goal is to
964		 * prevent one process from snooping on the perf data from
965		 * another process. To do that, we need to check the value
966		 * that will be written to the register. Hence, limit
967		 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
968		 */
969		if (reg_addr == OACONTROL) {
970			if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
971				DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
972				return false;
973			}
974
975			if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
976				*oacontrol_set = (cmd[2] != 0);
977		}
978
979		if (!valid_reg(ring->reg_table,
980			       ring->reg_count, reg_addr)) {
981			if (!is_master ||
982			    !valid_reg(ring->master_reg_table,
983				       ring->master_reg_count,
984				       reg_addr)) {
985				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
986						 reg_addr,
987						 *cmd,
988						 ring->id);
989				return false;
990			}
991		}
992	}
993
994	if (desc->flags & CMD_DESC_BITMASK) {
995		int i;
996
997		for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
998			u32 dword;
999
1000			if (desc->bits[i].mask == 0)
1001				break;
1002
1003			if (desc->bits[i].condition_mask != 0) {
1004				u32 offset =
1005					desc->bits[i].condition_offset;
1006				u32 condition = cmd[offset] &
1007					desc->bits[i].condition_mask;
1008
1009				if (condition == 0)
1010					continue;
1011			}
1012
1013			dword = cmd[desc->bits[i].offset] &
1014				desc->bits[i].mask;
1015
1016			if (dword != desc->bits[i].expected) {
1017				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
1018						 *cmd,
1019						 desc->bits[i].mask,
1020						 desc->bits[i].expected,
1021						 dword, ring->id);
1022				return false;
1023			}
1024		}
1025	}
1026
1027	return true;
1028}
1029
1030#define LENGTH_BIAS 2
1031
1032/**
1033 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1034 * @ring: the ring on which the batch is to execute
1035 * @batch_obj: the batch buffer in question
1036 * @shadow_batch_obj: copy of the batch buffer in question
1037 * @batch_start_offset: byte offset in the batch at which execution starts
1038 * @batch_len: length of the commands in batch_obj
1039 * @is_master: is the submitting process the drm master?
1040 *
1041 * Parses the specified batch buffer looking for privilege violations as
1042 * described in the overview.
1043 *
1044 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1045 * if the batch appears legal but should use hardware parsing
1046 */
1047int i915_parse_cmds(struct intel_engine_cs *ring,
1048		    struct drm_i915_gem_object *batch_obj,
1049		    struct drm_i915_gem_object *shadow_batch_obj,
1050		    u32 batch_start_offset,
1051		    u32 batch_len,
1052		    bool is_master)
1053{
1054	u32 *cmd, *batch_base, *batch_end;
1055	struct drm_i915_cmd_descriptor default_desc = { 0 };
1056	bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
1057	int ret = 0;
1058
1059	batch_base = copy_batch(shadow_batch_obj, batch_obj,
1060				batch_start_offset, batch_len);
1061	if (IS_ERR(batch_base)) {
1062		DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
1063		return PTR_ERR(batch_base);
1064	}
1065
1066	/*
1067	 * We use the batch length as size because the shadow object is as
1068	 * large or larger and copy_batch() will write MI_NOPs to the extra
1069	 * space. Parsing should be faster in some cases this way.
1070	 */
1071	batch_end = batch_base + (batch_len / sizeof(*batch_end));
1072
1073	cmd = batch_base;
1074	while (cmd < batch_end) {
1075		const struct drm_i915_cmd_descriptor *desc;
1076		u32 length;
1077
1078		if (*cmd == MI_BATCH_BUFFER_END)
1079			break;
1080
1081		desc = find_cmd(ring, *cmd, &default_desc);
1082		if (!desc) {
1083			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
1084					 *cmd);
1085			ret = -EINVAL;
1086			break;
1087		}
1088
1089		/*
1090		 * If the batch buffer contains a chained batch, return an
1091		 * error that tells the caller to abort and dispatch the
1092		 * workload as a non-secure batch.
1093		 */
1094		if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1095			ret = -EACCES;
1096			break;
1097		}
1098
1099		if (desc->flags & CMD_DESC_FIXED)
1100			length = desc->length.fixed;
1101		else
1102			length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
1103
1104		if ((batch_end - cmd) < length) {
1105			DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1106					 *cmd,
1107					 length,
1108					 batch_end - cmd);
1109			ret = -EINVAL;
1110			break;
1111		}
1112
1113		if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
1114			ret = -EINVAL;
1115			break;
1116		}
1117
1118		cmd += length;
1119	}
1120
1121	if (oacontrol_set) {
1122		DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
1123		ret = -EINVAL;
1124	}
1125
1126	if (cmd >= batch_end) {
1127		DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1128		ret = -EINVAL;
1129	}
1130
1131	vunmap(batch_base);
1132	i915_gem_object_unpin_pages(shadow_batch_obj);
1133
1134	return ret;
1135}
1136
1137/**
1138 * i915_cmd_parser_get_version() - get the cmd parser version number
1139 *
1140 * The cmd parser maintains a simple increasing integer version number suitable
1141 * for passing to userspace clients to determine what operations are permitted.
1142 *
1143 * Return: the current version number of the cmd parser
1144 */
1145int i915_cmd_parser_get_version(void)
1146{
1147	/*
1148	 * Command parser version history
1149	 *
1150	 * 1. Initial version. Checks batches and reports violations, but leaves
1151	 *    hardware parsing enabled (so does not allow new use cases).
1152	 * 2. Allow access to the MI_PREDICATE_SRC0 and
1153	 *    MI_PREDICATE_SRC1 registers.
1154	 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1155	 */
1156	return 3;
1157}
1158