1/* bpf_jit_comp.c : BPF JIT compiler
2 *
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11#include <linux/netdevice.h>
12#include <linux/filter.h>
13#include <linux/if_vlan.h>
14#include <asm/cacheflush.h>
15
16int bpf_jit_enable __read_mostly;
17
18/*
19 * assembly code in arch/x86/net/bpf_jit.S
20 */
21extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
22extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
23extern u8 sk_load_byte_positive_offset[];
24extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25extern u8 sk_load_byte_negative_offset[];
26
27static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28{
29	if (len == 1)
30		*ptr = bytes;
31	else if (len == 2)
32		*(u16 *)ptr = bytes;
33	else {
34		*(u32 *)ptr = bytes;
35		barrier();
36	}
37	return ptr + len;
38}
39
40#define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
41
42#define EMIT1(b1)		EMIT(b1, 1)
43#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
44#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
45#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
46#define EMIT1_off32(b1, off) \
47	do {EMIT1(b1); EMIT(off, 4); } while (0)
48#define EMIT2_off32(b1, b2, off) \
49	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
50#define EMIT3_off32(b1, b2, b3, off) \
51	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
52#define EMIT4_off32(b1, b2, b3, b4, off) \
53	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
54
55static bool is_imm8(int value)
56{
57	return value <= 127 && value >= -128;
58}
59
60static bool is_simm32(s64 value)
61{
62	return value == (s64) (s32) value;
63}
64
65/* mov dst, src */
66#define EMIT_mov(DST, SRC) \
67	do {if (DST != SRC) \
68		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
69	} while (0)
70
71static int bpf_size_to_x86_bytes(int bpf_size)
72{
73	if (bpf_size == BPF_W)
74		return 4;
75	else if (bpf_size == BPF_H)
76		return 2;
77	else if (bpf_size == BPF_B)
78		return 1;
79	else if (bpf_size == BPF_DW)
80		return 4; /* imm32 */
81	else
82		return 0;
83}
84
85/* list of x86 cond jumps opcodes (. + s8)
86 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
87 */
88#define X86_JB  0x72
89#define X86_JAE 0x73
90#define X86_JE  0x74
91#define X86_JNE 0x75
92#define X86_JBE 0x76
93#define X86_JA  0x77
94#define X86_JGE 0x7D
95#define X86_JG  0x7F
96
97static void bpf_flush_icache(void *start, void *end)
98{
99	mm_segment_t old_fs = get_fs();
100
101	set_fs(KERNEL_DS);
102	smp_wmb();
103	flush_icache_range((unsigned long)start, (unsigned long)end);
104	set_fs(old_fs);
105}
106
107#define CHOOSE_LOAD_FUNC(K, func) \
108	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
109
110/* pick a register outside of BPF range for JIT internal work */
111#define AUX_REG (MAX_BPF_REG + 1)
112
113/* the following table maps BPF registers to x64 registers.
114 * x64 register r12 is unused, since if used as base address register
115 * in load/store instructions, it always needs an extra byte of encoding
116 */
117static const int reg2hex[] = {
118	[BPF_REG_0] = 0,  /* rax */
119	[BPF_REG_1] = 7,  /* rdi */
120	[BPF_REG_2] = 6,  /* rsi */
121	[BPF_REG_3] = 2,  /* rdx */
122	[BPF_REG_4] = 1,  /* rcx */
123	[BPF_REG_5] = 0,  /* r8 */
124	[BPF_REG_6] = 3,  /* rbx callee saved */
125	[BPF_REG_7] = 5,  /* r13 callee saved */
126	[BPF_REG_8] = 6,  /* r14 callee saved */
127	[BPF_REG_9] = 7,  /* r15 callee saved */
128	[BPF_REG_FP] = 5, /* rbp readonly */
129	[AUX_REG] = 3,    /* r11 temp register */
130};
131
132/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
133 * which need extra byte of encoding.
134 * rax,rcx,...,rbp have simpler encoding
135 */
136static bool is_ereg(u32 reg)
137{
138	return (1 << reg) & (BIT(BPF_REG_5) |
139			     BIT(AUX_REG) |
140			     BIT(BPF_REG_7) |
141			     BIT(BPF_REG_8) |
142			     BIT(BPF_REG_9));
143}
144
145/* add modifiers if 'reg' maps to x64 registers r8..r15 */
146static u8 add_1mod(u8 byte, u32 reg)
147{
148	if (is_ereg(reg))
149		byte |= 1;
150	return byte;
151}
152
153static u8 add_2mod(u8 byte, u32 r1, u32 r2)
154{
155	if (is_ereg(r1))
156		byte |= 1;
157	if (is_ereg(r2))
158		byte |= 4;
159	return byte;
160}
161
162/* encode 'dst_reg' register into x64 opcode 'byte' */
163static u8 add_1reg(u8 byte, u32 dst_reg)
164{
165	return byte + reg2hex[dst_reg];
166}
167
168/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
170{
171	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
172}
173
174static void jit_fill_hole(void *area, unsigned int size)
175{
176	/* fill whole space with int3 instructions */
177	memset(area, 0xcc, size);
178}
179
180struct jit_context {
181	int cleanup_addr; /* epilogue code offset */
182	bool seen_ld_abs;
183};
184
185/* maximum number of bytes emitted while JITing one eBPF insn */
186#define BPF_MAX_INSN_SIZE	128
187#define BPF_INSN_SAFETY		64
188
189static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
190		  int oldproglen, struct jit_context *ctx)
191{
192	struct bpf_insn *insn = bpf_prog->insnsi;
193	int insn_cnt = bpf_prog->len;
194	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
195	bool seen_exit = false;
196	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
197	int i;
198	int proglen = 0;
199	u8 *prog = temp;
200	int stacksize = MAX_BPF_STACK +
201		32 /* space for rbx, r13, r14, r15 */ +
202		8 /* space for skb_copy_bits() buffer */;
203
204	EMIT1(0x55); /* push rbp */
205	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
206
207	/* sub rsp, stacksize */
208	EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
209
210	/* all classic BPF filters use R6(rbx) save it */
211
212	/* mov qword ptr [rbp-X],rbx */
213	EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
214
215	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
216	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
217	 * R8(r14). R9(r15) spill could be made conditional, but there is only
218	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
219	 * The overhead of extra spill is negligible for any filter other
220	 * than synthetic ones. Therefore not worth adding complexity.
221	 */
222
223	/* mov qword ptr [rbp-X],r13 */
224	EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
225	/* mov qword ptr [rbp-X],r14 */
226	EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
227	/* mov qword ptr [rbp-X],r15 */
228	EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
229
230	/* clear A and X registers */
231	EMIT2(0x31, 0xc0); /* xor eax, eax */
232	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
233
234	if (seen_ld_abs) {
235		/* r9d : skb->len - skb->data_len (headlen)
236		 * r10 : skb->data
237		 */
238		if (is_imm8(offsetof(struct sk_buff, len)))
239			/* mov %r9d, off8(%rdi) */
240			EMIT4(0x44, 0x8b, 0x4f,
241			      offsetof(struct sk_buff, len));
242		else
243			/* mov %r9d, off32(%rdi) */
244			EMIT3_off32(0x44, 0x8b, 0x8f,
245				    offsetof(struct sk_buff, len));
246
247		if (is_imm8(offsetof(struct sk_buff, data_len)))
248			/* sub %r9d, off8(%rdi) */
249			EMIT4(0x44, 0x2b, 0x4f,
250			      offsetof(struct sk_buff, data_len));
251		else
252			EMIT3_off32(0x44, 0x2b, 0x8f,
253				    offsetof(struct sk_buff, data_len));
254
255		if (is_imm8(offsetof(struct sk_buff, data)))
256			/* mov %r10, off8(%rdi) */
257			EMIT4(0x4c, 0x8b, 0x57,
258			      offsetof(struct sk_buff, data));
259		else
260			/* mov %r10, off32(%rdi) */
261			EMIT3_off32(0x4c, 0x8b, 0x97,
262				    offsetof(struct sk_buff, data));
263	}
264
265	for (i = 0; i < insn_cnt; i++, insn++) {
266		const s32 imm32 = insn->imm;
267		u32 dst_reg = insn->dst_reg;
268		u32 src_reg = insn->src_reg;
269		u8 b1 = 0, b2 = 0, b3 = 0;
270		s64 jmp_offset;
271		u8 jmp_cond;
272		int ilen;
273		u8 *func;
274
275		switch (insn->code) {
276			/* ALU */
277		case BPF_ALU | BPF_ADD | BPF_X:
278		case BPF_ALU | BPF_SUB | BPF_X:
279		case BPF_ALU | BPF_AND | BPF_X:
280		case BPF_ALU | BPF_OR | BPF_X:
281		case BPF_ALU | BPF_XOR | BPF_X:
282		case BPF_ALU64 | BPF_ADD | BPF_X:
283		case BPF_ALU64 | BPF_SUB | BPF_X:
284		case BPF_ALU64 | BPF_AND | BPF_X:
285		case BPF_ALU64 | BPF_OR | BPF_X:
286		case BPF_ALU64 | BPF_XOR | BPF_X:
287			switch (BPF_OP(insn->code)) {
288			case BPF_ADD: b2 = 0x01; break;
289			case BPF_SUB: b2 = 0x29; break;
290			case BPF_AND: b2 = 0x21; break;
291			case BPF_OR: b2 = 0x09; break;
292			case BPF_XOR: b2 = 0x31; break;
293			}
294			if (BPF_CLASS(insn->code) == BPF_ALU64)
295				EMIT1(add_2mod(0x48, dst_reg, src_reg));
296			else if (is_ereg(dst_reg) || is_ereg(src_reg))
297				EMIT1(add_2mod(0x40, dst_reg, src_reg));
298			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
299			break;
300
301			/* mov dst, src */
302		case BPF_ALU64 | BPF_MOV | BPF_X:
303			EMIT_mov(dst_reg, src_reg);
304			break;
305
306			/* mov32 dst, src */
307		case BPF_ALU | BPF_MOV | BPF_X:
308			if (is_ereg(dst_reg) || is_ereg(src_reg))
309				EMIT1(add_2mod(0x40, dst_reg, src_reg));
310			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
311			break;
312
313			/* neg dst */
314		case BPF_ALU | BPF_NEG:
315		case BPF_ALU64 | BPF_NEG:
316			if (BPF_CLASS(insn->code) == BPF_ALU64)
317				EMIT1(add_1mod(0x48, dst_reg));
318			else if (is_ereg(dst_reg))
319				EMIT1(add_1mod(0x40, dst_reg));
320			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
321			break;
322
323		case BPF_ALU | BPF_ADD | BPF_K:
324		case BPF_ALU | BPF_SUB | BPF_K:
325		case BPF_ALU | BPF_AND | BPF_K:
326		case BPF_ALU | BPF_OR | BPF_K:
327		case BPF_ALU | BPF_XOR | BPF_K:
328		case BPF_ALU64 | BPF_ADD | BPF_K:
329		case BPF_ALU64 | BPF_SUB | BPF_K:
330		case BPF_ALU64 | BPF_AND | BPF_K:
331		case BPF_ALU64 | BPF_OR | BPF_K:
332		case BPF_ALU64 | BPF_XOR | BPF_K:
333			if (BPF_CLASS(insn->code) == BPF_ALU64)
334				EMIT1(add_1mod(0x48, dst_reg));
335			else if (is_ereg(dst_reg))
336				EMIT1(add_1mod(0x40, dst_reg));
337
338			switch (BPF_OP(insn->code)) {
339			case BPF_ADD: b3 = 0xC0; break;
340			case BPF_SUB: b3 = 0xE8; break;
341			case BPF_AND: b3 = 0xE0; break;
342			case BPF_OR: b3 = 0xC8; break;
343			case BPF_XOR: b3 = 0xF0; break;
344			}
345
346			if (is_imm8(imm32))
347				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
348			else
349				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
350			break;
351
352		case BPF_ALU64 | BPF_MOV | BPF_K:
353			/* optimization: if imm32 is positive,
354			 * use 'mov eax, imm32' (which zero-extends imm32)
355			 * to save 2 bytes
356			 */
357			if (imm32 < 0) {
358				/* 'mov rax, imm32' sign extends imm32 */
359				b1 = add_1mod(0x48, dst_reg);
360				b2 = 0xC7;
361				b3 = 0xC0;
362				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
363				break;
364			}
365
366		case BPF_ALU | BPF_MOV | BPF_K:
367			/* mov %eax, imm32 */
368			if (is_ereg(dst_reg))
369				EMIT1(add_1mod(0x40, dst_reg));
370			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
371			break;
372
373		case BPF_LD | BPF_IMM | BPF_DW:
374			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
375			    insn[1].dst_reg != 0 || insn[1].off != 0) {
376				/* verifier must catch invalid insns */
377				pr_err("invalid BPF_LD_IMM64 insn\n");
378				return -EINVAL;
379			}
380
381			/* movabsq %rax, imm64 */
382			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
383			EMIT(insn[0].imm, 4);
384			EMIT(insn[1].imm, 4);
385
386			insn++;
387			i++;
388			break;
389
390			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
391		case BPF_ALU | BPF_MOD | BPF_X:
392		case BPF_ALU | BPF_DIV | BPF_X:
393		case BPF_ALU | BPF_MOD | BPF_K:
394		case BPF_ALU | BPF_DIV | BPF_K:
395		case BPF_ALU64 | BPF_MOD | BPF_X:
396		case BPF_ALU64 | BPF_DIV | BPF_X:
397		case BPF_ALU64 | BPF_MOD | BPF_K:
398		case BPF_ALU64 | BPF_DIV | BPF_K:
399			EMIT1(0x50); /* push rax */
400			EMIT1(0x52); /* push rdx */
401
402			if (BPF_SRC(insn->code) == BPF_X)
403				/* mov r11, src_reg */
404				EMIT_mov(AUX_REG, src_reg);
405			else
406				/* mov r11, imm32 */
407				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
408
409			/* mov rax, dst_reg */
410			EMIT_mov(BPF_REG_0, dst_reg);
411
412			/* xor edx, edx
413			 * equivalent to 'xor rdx, rdx', but one byte less
414			 */
415			EMIT2(0x31, 0xd2);
416
417			if (BPF_SRC(insn->code) == BPF_X) {
418				/* if (src_reg == 0) return 0 */
419
420				/* cmp r11, 0 */
421				EMIT4(0x49, 0x83, 0xFB, 0x00);
422
423				/* jne .+9 (skip over pop, pop, xor and jmp) */
424				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
425				EMIT1(0x5A); /* pop rdx */
426				EMIT1(0x58); /* pop rax */
427				EMIT2(0x31, 0xc0); /* xor eax, eax */
428
429				/* jmp cleanup_addr
430				 * addrs[i] - 11, because there are 11 bytes
431				 * after this insn: div, mov, pop, pop, mov
432				 */
433				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
434				EMIT1_off32(0xE9, jmp_offset);
435			}
436
437			if (BPF_CLASS(insn->code) == BPF_ALU64)
438				/* div r11 */
439				EMIT3(0x49, 0xF7, 0xF3);
440			else
441				/* div r11d */
442				EMIT3(0x41, 0xF7, 0xF3);
443
444			if (BPF_OP(insn->code) == BPF_MOD)
445				/* mov r11, rdx */
446				EMIT3(0x49, 0x89, 0xD3);
447			else
448				/* mov r11, rax */
449				EMIT3(0x49, 0x89, 0xC3);
450
451			EMIT1(0x5A); /* pop rdx */
452			EMIT1(0x58); /* pop rax */
453
454			/* mov dst_reg, r11 */
455			EMIT_mov(dst_reg, AUX_REG);
456			break;
457
458		case BPF_ALU | BPF_MUL | BPF_K:
459		case BPF_ALU | BPF_MUL | BPF_X:
460		case BPF_ALU64 | BPF_MUL | BPF_K:
461		case BPF_ALU64 | BPF_MUL | BPF_X:
462			EMIT1(0x50); /* push rax */
463			EMIT1(0x52); /* push rdx */
464
465			/* mov r11, dst_reg */
466			EMIT_mov(AUX_REG, dst_reg);
467
468			if (BPF_SRC(insn->code) == BPF_X)
469				/* mov rax, src_reg */
470				EMIT_mov(BPF_REG_0, src_reg);
471			else
472				/* mov rax, imm32 */
473				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
474
475			if (BPF_CLASS(insn->code) == BPF_ALU64)
476				EMIT1(add_1mod(0x48, AUX_REG));
477			else if (is_ereg(AUX_REG))
478				EMIT1(add_1mod(0x40, AUX_REG));
479			/* mul(q) r11 */
480			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
481
482			/* mov r11, rax */
483			EMIT_mov(AUX_REG, BPF_REG_0);
484
485			EMIT1(0x5A); /* pop rdx */
486			EMIT1(0x58); /* pop rax */
487
488			/* mov dst_reg, r11 */
489			EMIT_mov(dst_reg, AUX_REG);
490			break;
491
492			/* shifts */
493		case BPF_ALU | BPF_LSH | BPF_K:
494		case BPF_ALU | BPF_RSH | BPF_K:
495		case BPF_ALU | BPF_ARSH | BPF_K:
496		case BPF_ALU64 | BPF_LSH | BPF_K:
497		case BPF_ALU64 | BPF_RSH | BPF_K:
498		case BPF_ALU64 | BPF_ARSH | BPF_K:
499			if (BPF_CLASS(insn->code) == BPF_ALU64)
500				EMIT1(add_1mod(0x48, dst_reg));
501			else if (is_ereg(dst_reg))
502				EMIT1(add_1mod(0x40, dst_reg));
503
504			switch (BPF_OP(insn->code)) {
505			case BPF_LSH: b3 = 0xE0; break;
506			case BPF_RSH: b3 = 0xE8; break;
507			case BPF_ARSH: b3 = 0xF8; break;
508			}
509			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
510			break;
511
512		case BPF_ALU | BPF_LSH | BPF_X:
513		case BPF_ALU | BPF_RSH | BPF_X:
514		case BPF_ALU | BPF_ARSH | BPF_X:
515		case BPF_ALU64 | BPF_LSH | BPF_X:
516		case BPF_ALU64 | BPF_RSH | BPF_X:
517		case BPF_ALU64 | BPF_ARSH | BPF_X:
518
519			/* check for bad case when dst_reg == rcx */
520			if (dst_reg == BPF_REG_4) {
521				/* mov r11, dst_reg */
522				EMIT_mov(AUX_REG, dst_reg);
523				dst_reg = AUX_REG;
524			}
525
526			if (src_reg != BPF_REG_4) { /* common case */
527				EMIT1(0x51); /* push rcx */
528
529				/* mov rcx, src_reg */
530				EMIT_mov(BPF_REG_4, src_reg);
531			}
532
533			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
534			if (BPF_CLASS(insn->code) == BPF_ALU64)
535				EMIT1(add_1mod(0x48, dst_reg));
536			else if (is_ereg(dst_reg))
537				EMIT1(add_1mod(0x40, dst_reg));
538
539			switch (BPF_OP(insn->code)) {
540			case BPF_LSH: b3 = 0xE0; break;
541			case BPF_RSH: b3 = 0xE8; break;
542			case BPF_ARSH: b3 = 0xF8; break;
543			}
544			EMIT2(0xD3, add_1reg(b3, dst_reg));
545
546			if (src_reg != BPF_REG_4)
547				EMIT1(0x59); /* pop rcx */
548
549			if (insn->dst_reg == BPF_REG_4)
550				/* mov dst_reg, r11 */
551				EMIT_mov(insn->dst_reg, AUX_REG);
552			break;
553
554		case BPF_ALU | BPF_END | BPF_FROM_BE:
555			switch (imm32) {
556			case 16:
557				/* emit 'ror %ax, 8' to swap lower 2 bytes */
558				EMIT1(0x66);
559				if (is_ereg(dst_reg))
560					EMIT1(0x41);
561				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
562
563				/* emit 'movzwl eax, ax' */
564				if (is_ereg(dst_reg))
565					EMIT3(0x45, 0x0F, 0xB7);
566				else
567					EMIT2(0x0F, 0xB7);
568				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
569				break;
570			case 32:
571				/* emit 'bswap eax' to swap lower 4 bytes */
572				if (is_ereg(dst_reg))
573					EMIT2(0x41, 0x0F);
574				else
575					EMIT1(0x0F);
576				EMIT1(add_1reg(0xC8, dst_reg));
577				break;
578			case 64:
579				/* emit 'bswap rax' to swap 8 bytes */
580				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
581				      add_1reg(0xC8, dst_reg));
582				break;
583			}
584			break;
585
586		case BPF_ALU | BPF_END | BPF_FROM_LE:
587			switch (imm32) {
588			case 16:
589				/* emit 'movzwl eax, ax' to zero extend 16-bit
590				 * into 64 bit
591				 */
592				if (is_ereg(dst_reg))
593					EMIT3(0x45, 0x0F, 0xB7);
594				else
595					EMIT2(0x0F, 0xB7);
596				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
597				break;
598			case 32:
599				/* emit 'mov eax, eax' to clear upper 32-bits */
600				if (is_ereg(dst_reg))
601					EMIT1(0x45);
602				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
603				break;
604			case 64:
605				/* nop */
606				break;
607			}
608			break;
609
610			/* ST: *(u8*)(dst_reg + off) = imm */
611		case BPF_ST | BPF_MEM | BPF_B:
612			if (is_ereg(dst_reg))
613				EMIT2(0x41, 0xC6);
614			else
615				EMIT1(0xC6);
616			goto st;
617		case BPF_ST | BPF_MEM | BPF_H:
618			if (is_ereg(dst_reg))
619				EMIT3(0x66, 0x41, 0xC7);
620			else
621				EMIT2(0x66, 0xC7);
622			goto st;
623		case BPF_ST | BPF_MEM | BPF_W:
624			if (is_ereg(dst_reg))
625				EMIT2(0x41, 0xC7);
626			else
627				EMIT1(0xC7);
628			goto st;
629		case BPF_ST | BPF_MEM | BPF_DW:
630			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
631
632st:			if (is_imm8(insn->off))
633				EMIT2(add_1reg(0x40, dst_reg), insn->off);
634			else
635				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
636
637			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
638			break;
639
640			/* STX: *(u8*)(dst_reg + off) = src_reg */
641		case BPF_STX | BPF_MEM | BPF_B:
642			/* emit 'mov byte ptr [rax + off], al' */
643			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
644			    /* have to add extra byte for x86 SIL, DIL regs */
645			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
646				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
647			else
648				EMIT1(0x88);
649			goto stx;
650		case BPF_STX | BPF_MEM | BPF_H:
651			if (is_ereg(dst_reg) || is_ereg(src_reg))
652				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
653			else
654				EMIT2(0x66, 0x89);
655			goto stx;
656		case BPF_STX | BPF_MEM | BPF_W:
657			if (is_ereg(dst_reg) || is_ereg(src_reg))
658				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
659			else
660				EMIT1(0x89);
661			goto stx;
662		case BPF_STX | BPF_MEM | BPF_DW:
663			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
664stx:			if (is_imm8(insn->off))
665				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
666			else
667				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
668					    insn->off);
669			break;
670
671			/* LDX: dst_reg = *(u8*)(src_reg + off) */
672		case BPF_LDX | BPF_MEM | BPF_B:
673			/* emit 'movzx rax, byte ptr [rax + off]' */
674			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
675			goto ldx;
676		case BPF_LDX | BPF_MEM | BPF_H:
677			/* emit 'movzx rax, word ptr [rax + off]' */
678			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
679			goto ldx;
680		case BPF_LDX | BPF_MEM | BPF_W:
681			/* emit 'mov eax, dword ptr [rax+0x14]' */
682			if (is_ereg(dst_reg) || is_ereg(src_reg))
683				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
684			else
685				EMIT1(0x8B);
686			goto ldx;
687		case BPF_LDX | BPF_MEM | BPF_DW:
688			/* emit 'mov rax, qword ptr [rax+0x14]' */
689			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
690ldx:			/* if insn->off == 0 we can save one extra byte, but
691			 * special case of x86 r13 which always needs an offset
692			 * is not worth the hassle
693			 */
694			if (is_imm8(insn->off))
695				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
696			else
697				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
698					    insn->off);
699			break;
700
701			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
702		case BPF_STX | BPF_XADD | BPF_W:
703			/* emit 'lock add dword ptr [rax + off], eax' */
704			if (is_ereg(dst_reg) || is_ereg(src_reg))
705				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
706			else
707				EMIT2(0xF0, 0x01);
708			goto xadd;
709		case BPF_STX | BPF_XADD | BPF_DW:
710			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
711xadd:			if (is_imm8(insn->off))
712				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
713			else
714				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
715					    insn->off);
716			break;
717
718			/* call */
719		case BPF_JMP | BPF_CALL:
720			func = (u8 *) __bpf_call_base + imm32;
721			jmp_offset = func - (image + addrs[i]);
722			if (seen_ld_abs) {
723				EMIT2(0x41, 0x52); /* push %r10 */
724				EMIT2(0x41, 0x51); /* push %r9 */
725				/* need to adjust jmp offset, since
726				 * pop %r9, pop %r10 take 4 bytes after call insn
727				 */
728				jmp_offset += 4;
729			}
730			if (!imm32 || !is_simm32(jmp_offset)) {
731				pr_err("unsupported bpf func %d addr %p image %p\n",
732				       imm32, func, image);
733				return -EINVAL;
734			}
735			EMIT1_off32(0xE8, jmp_offset);
736			if (seen_ld_abs) {
737				EMIT2(0x41, 0x59); /* pop %r9 */
738				EMIT2(0x41, 0x5A); /* pop %r10 */
739			}
740			break;
741
742			/* cond jump */
743		case BPF_JMP | BPF_JEQ | BPF_X:
744		case BPF_JMP | BPF_JNE | BPF_X:
745		case BPF_JMP | BPF_JGT | BPF_X:
746		case BPF_JMP | BPF_JGE | BPF_X:
747		case BPF_JMP | BPF_JSGT | BPF_X:
748		case BPF_JMP | BPF_JSGE | BPF_X:
749			/* cmp dst_reg, src_reg */
750			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
751			      add_2reg(0xC0, dst_reg, src_reg));
752			goto emit_cond_jmp;
753
754		case BPF_JMP | BPF_JSET | BPF_X:
755			/* test dst_reg, src_reg */
756			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
757			      add_2reg(0xC0, dst_reg, src_reg));
758			goto emit_cond_jmp;
759
760		case BPF_JMP | BPF_JSET | BPF_K:
761			/* test dst_reg, imm32 */
762			EMIT1(add_1mod(0x48, dst_reg));
763			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
764			goto emit_cond_jmp;
765
766		case BPF_JMP | BPF_JEQ | BPF_K:
767		case BPF_JMP | BPF_JNE | BPF_K:
768		case BPF_JMP | BPF_JGT | BPF_K:
769		case BPF_JMP | BPF_JGE | BPF_K:
770		case BPF_JMP | BPF_JSGT | BPF_K:
771		case BPF_JMP | BPF_JSGE | BPF_K:
772			/* cmp dst_reg, imm8/32 */
773			EMIT1(add_1mod(0x48, dst_reg));
774
775			if (is_imm8(imm32))
776				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
777			else
778				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
779
780emit_cond_jmp:		/* convert BPF opcode to x86 */
781			switch (BPF_OP(insn->code)) {
782			case BPF_JEQ:
783				jmp_cond = X86_JE;
784				break;
785			case BPF_JSET:
786			case BPF_JNE:
787				jmp_cond = X86_JNE;
788				break;
789			case BPF_JGT:
790				/* GT is unsigned '>', JA in x86 */
791				jmp_cond = X86_JA;
792				break;
793			case BPF_JGE:
794				/* GE is unsigned '>=', JAE in x86 */
795				jmp_cond = X86_JAE;
796				break;
797			case BPF_JSGT:
798				/* signed '>', GT in x86 */
799				jmp_cond = X86_JG;
800				break;
801			case BPF_JSGE:
802				/* signed '>=', GE in x86 */
803				jmp_cond = X86_JGE;
804				break;
805			default: /* to silence gcc warning */
806				return -EFAULT;
807			}
808			jmp_offset = addrs[i + insn->off] - addrs[i];
809			if (is_imm8(jmp_offset)) {
810				EMIT2(jmp_cond, jmp_offset);
811			} else if (is_simm32(jmp_offset)) {
812				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
813			} else {
814				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
815				return -EFAULT;
816			}
817
818			break;
819
820		case BPF_JMP | BPF_JA:
821			jmp_offset = addrs[i + insn->off] - addrs[i];
822			if (!jmp_offset)
823				/* optimize out nop jumps */
824				break;
825emit_jmp:
826			if (is_imm8(jmp_offset)) {
827				EMIT2(0xEB, jmp_offset);
828			} else if (is_simm32(jmp_offset)) {
829				EMIT1_off32(0xE9, jmp_offset);
830			} else {
831				pr_err("jmp gen bug %llx\n", jmp_offset);
832				return -EFAULT;
833			}
834			break;
835
836		case BPF_LD | BPF_IND | BPF_W:
837			func = sk_load_word;
838			goto common_load;
839		case BPF_LD | BPF_ABS | BPF_W:
840			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
841common_load:
842			ctx->seen_ld_abs = seen_ld_abs = true;
843			jmp_offset = func - (image + addrs[i]);
844			if (!func || !is_simm32(jmp_offset)) {
845				pr_err("unsupported bpf func %d addr %p image %p\n",
846				       imm32, func, image);
847				return -EINVAL;
848			}
849			if (BPF_MODE(insn->code) == BPF_ABS) {
850				/* mov %esi, imm32 */
851				EMIT1_off32(0xBE, imm32);
852			} else {
853				/* mov %rsi, src_reg */
854				EMIT_mov(BPF_REG_2, src_reg);
855				if (imm32) {
856					if (is_imm8(imm32))
857						/* add %esi, imm8 */
858						EMIT3(0x83, 0xC6, imm32);
859					else
860						/* add %esi, imm32 */
861						EMIT2_off32(0x81, 0xC6, imm32);
862				}
863			}
864			/* skb pointer is in R6 (%rbx), it will be copied into
865			 * %rdi if skb_copy_bits() call is necessary.
866			 * sk_load_* helpers also use %r10 and %r9d.
867			 * See bpf_jit.S
868			 */
869			EMIT1_off32(0xE8, jmp_offset); /* call */
870			break;
871
872		case BPF_LD | BPF_IND | BPF_H:
873			func = sk_load_half;
874			goto common_load;
875		case BPF_LD | BPF_ABS | BPF_H:
876			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
877			goto common_load;
878		case BPF_LD | BPF_IND | BPF_B:
879			func = sk_load_byte;
880			goto common_load;
881		case BPF_LD | BPF_ABS | BPF_B:
882			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
883			goto common_load;
884
885		case BPF_JMP | BPF_EXIT:
886			if (seen_exit) {
887				jmp_offset = ctx->cleanup_addr - addrs[i];
888				goto emit_jmp;
889			}
890			seen_exit = true;
891			/* update cleanup_addr */
892			ctx->cleanup_addr = proglen;
893			/* mov rbx, qword ptr [rbp-X] */
894			EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
895			/* mov r13, qword ptr [rbp-X] */
896			EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
897			/* mov r14, qword ptr [rbp-X] */
898			EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
899			/* mov r15, qword ptr [rbp-X] */
900			EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
901
902			EMIT1(0xC9); /* leave */
903			EMIT1(0xC3); /* ret */
904			break;
905
906		default:
907			/* By design x64 JIT should support all BPF instructions
908			 * This error will be seen if new instruction was added
909			 * to interpreter, but not to JIT
910			 * or if there is junk in bpf_prog
911			 */
912			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
913			return -EINVAL;
914		}
915
916		ilen = prog - temp;
917		if (ilen > BPF_MAX_INSN_SIZE) {
918			pr_err("bpf_jit_compile fatal insn size error\n");
919			return -EFAULT;
920		}
921
922		if (image) {
923			if (unlikely(proglen + ilen > oldproglen)) {
924				pr_err("bpf_jit_compile fatal error\n");
925				return -EFAULT;
926			}
927			memcpy(image + proglen, temp, ilen);
928		}
929		proglen += ilen;
930		addrs[i] = proglen;
931		prog = temp;
932	}
933	return proglen;
934}
935
936void bpf_jit_compile(struct bpf_prog *prog)
937{
938}
939
940void bpf_int_jit_compile(struct bpf_prog *prog)
941{
942	struct bpf_binary_header *header = NULL;
943	int proglen, oldproglen = 0;
944	struct jit_context ctx = {};
945	u8 *image = NULL;
946	int *addrs;
947	int pass;
948	int i;
949
950	if (!bpf_jit_enable)
951		return;
952
953	if (!prog || !prog->len)
954		return;
955
956	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
957	if (!addrs)
958		return;
959
960	/* Before first pass, make a rough estimation of addrs[]
961	 * each bpf instruction is translated to less than 64 bytes
962	 */
963	for (proglen = 0, i = 0; i < prog->len; i++) {
964		proglen += 64;
965		addrs[i] = proglen;
966	}
967	ctx.cleanup_addr = proglen;
968
969	/* JITed image shrinks with every pass and the loop iterates
970	 * until the image stops shrinking. Very large bpf programs
971	 * may converge on the last pass. In such case do one more
972	 * pass to emit the final image
973	 */
974	for (pass = 0; pass < 10 || image; pass++) {
975		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
976		if (proglen <= 0) {
977			image = NULL;
978			if (header)
979				bpf_jit_binary_free(header);
980			goto out;
981		}
982		if (image) {
983			if (proglen != oldproglen) {
984				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
985				       proglen, oldproglen);
986				goto out;
987			}
988			break;
989		}
990		if (proglen == oldproglen) {
991			header = bpf_jit_binary_alloc(proglen, &image,
992						      1, jit_fill_hole);
993			if (!header)
994				goto out;
995		}
996		oldproglen = proglen;
997	}
998
999	if (bpf_jit_enable > 1)
1000		bpf_jit_dump(prog->len, proglen, 0, image);
1001
1002	if (image) {
1003		bpf_flush_icache(header, image + proglen);
1004		set_memory_ro((unsigned long)header, header->pages);
1005		prog->bpf_func = (void *)image;
1006		prog->jited = true;
1007	}
1008out:
1009	kfree(addrs);
1010}
1011
1012void bpf_jit_free(struct bpf_prog *fp)
1013{
1014	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1015	struct bpf_binary_header *header = (void *)addr;
1016
1017	if (!fp->jited)
1018		goto free_filter;
1019
1020	set_memory_rw(addr, header->pages);
1021	bpf_jit_binary_free(header);
1022
1023free_filter:
1024	bpf_prog_unlock_free(fp);
1025}
1026