1#include <linux/moduleloader.h> 2#include <linux/workqueue.h> 3#include <linux/netdevice.h> 4#include <linux/filter.h> 5#include <linux/cache.h> 6#include <linux/if_vlan.h> 7 8#include <asm/cacheflush.h> 9#include <asm/ptrace.h> 10 11#include "bpf_jit.h" 12 13int bpf_jit_enable __read_mostly; 14 15static inline bool is_simm13(unsigned int value) 16{ 17 return value + 0x1000 < 0x2000; 18} 19 20static void bpf_flush_icache(void *start_, void *end_) 21{ 22#ifdef CONFIG_SPARC64 23 /* Cheetah's I-cache is fully coherent. */ 24 if (tlb_type == spitfire) { 25 unsigned long start = (unsigned long) start_; 26 unsigned long end = (unsigned long) end_; 27 28 start &= ~7UL; 29 end = (end + 7UL) & ~7UL; 30 while (start < end) { 31 flushi(start); 32 start += 32; 33 } 34 } 35#endif 36} 37 38#define SEEN_DATAREF 1 /* might call external helpers */ 39#define SEEN_XREG 2 /* ebx is used */ 40#define SEEN_MEM 4 /* use mem[] for temporary storage */ 41 42#define S13(X) ((X) & 0x1fff) 43#define IMMED 0x00002000 44#define RD(X) ((X) << 25) 45#define RS1(X) ((X) << 14) 46#define RS2(X) ((X)) 47#define OP(X) ((X) << 30) 48#define OP2(X) ((X) << 22) 49#define OP3(X) ((X) << 19) 50#define COND(X) ((X) << 25) 51#define F1(X) OP(X) 52#define F2(X, Y) (OP(X) | OP2(Y)) 53#define F3(X, Y) (OP(X) | OP3(Y)) 54 55#define CONDN COND(0x0) 56#define CONDE COND(0x1) 57#define CONDLE COND(0x2) 58#define CONDL COND(0x3) 59#define CONDLEU COND(0x4) 60#define CONDCS COND(0x5) 61#define CONDNEG COND(0x6) 62#define CONDVC COND(0x7) 63#define CONDA COND(0x8) 64#define CONDNE COND(0x9) 65#define CONDG COND(0xa) 66#define CONDGE COND(0xb) 67#define CONDGU COND(0xc) 68#define CONDCC COND(0xd) 69#define CONDPOS COND(0xe) 70#define CONDVS COND(0xf) 71 72#define CONDGEU CONDCC 73#define CONDLU CONDCS 74 75#define WDISP22(X) (((X) >> 2) & 0x3fffff) 76 77#define BA (F2(0, 2) | CONDA) 78#define BGU (F2(0, 2) | CONDGU) 79#define BLEU (F2(0, 2) | CONDLEU) 80#define BGEU (F2(0, 2) | CONDGEU) 81#define BLU (F2(0, 2) | CONDLU) 82#define BE (F2(0, 2) | CONDE) 83#define BNE (F2(0, 2) | CONDNE) 84 85#ifdef CONFIG_SPARC64 86#define BE_PTR (F2(0, 1) | CONDE | (2 << 20)) 87#else 88#define BE_PTR BE 89#endif 90 91#define SETHI(K, REG) \ 92 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) 93#define OR_LO(K, REG) \ 94 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG)) 95 96#define ADD F3(2, 0x00) 97#define AND F3(2, 0x01) 98#define ANDCC F3(2, 0x11) 99#define OR F3(2, 0x02) 100#define XOR F3(2, 0x03) 101#define SUB F3(2, 0x04) 102#define SUBCC F3(2, 0x14) 103#define MUL F3(2, 0x0a) /* umul */ 104#define DIV F3(2, 0x0e) /* udiv */ 105#define SLL F3(2, 0x25) 106#define SRL F3(2, 0x26) 107#define JMPL F3(2, 0x38) 108#define CALL F1(1) 109#define BR F2(0, 0x01) 110#define RD_Y F3(2, 0x28) 111#define WR_Y F3(2, 0x30) 112 113#define LD32 F3(3, 0x00) 114#define LD8 F3(3, 0x01) 115#define LD16 F3(3, 0x02) 116#define LD64 F3(3, 0x0b) 117#define ST32 F3(3, 0x04) 118 119#ifdef CONFIG_SPARC64 120#define LDPTR LD64 121#define BASE_STACKFRAME 176 122#else 123#define LDPTR LD32 124#define BASE_STACKFRAME 96 125#endif 126 127#define LD32I (LD32 | IMMED) 128#define LD8I (LD8 | IMMED) 129#define LD16I (LD16 | IMMED) 130#define LD64I (LD64 | IMMED) 131#define LDPTRI (LDPTR | IMMED) 132#define ST32I (ST32 | IMMED) 133 134#define emit_nop() \ 135do { \ 136 *prog++ = SETHI(0, G0); \ 137} while (0) 138 139#define emit_neg() \ 140do { /* sub %g0, r_A, r_A */ \ 141 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \ 142} while (0) 143 144#define emit_reg_move(FROM, TO) \ 145do { /* or %g0, FROM, TO */ \ 146 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \ 147} while (0) 148 149#define emit_clear(REG) \ 150do { /* or %g0, %g0, REG */ \ 151 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \ 152} while (0) 153 154#define emit_set_const(K, REG) \ 155do { /* sethi %hi(K), REG */ \ 156 *prog++ = SETHI(K, REG); \ 157 /* or REG, %lo(K), REG */ \ 158 *prog++ = OR_LO(K, REG); \ 159} while (0) 160 161 /* Emit 162 * 163 * OP r_A, r_X, r_A 164 */ 165#define emit_alu_X(OPCODE) \ 166do { \ 167 seen |= SEEN_XREG; \ 168 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \ 169} while (0) 170 171 /* Emit either: 172 * 173 * OP r_A, K, r_A 174 * 175 * or 176 * 177 * sethi %hi(K), r_TMP 178 * or r_TMP, %lo(K), r_TMP 179 * OP r_A, r_TMP, r_A 180 * 181 * depending upon whether K fits in a signed 13-bit 182 * immediate instruction field. Emit nothing if K 183 * is zero. 184 */ 185#define emit_alu_K(OPCODE, K) \ 186do { \ 187 if (K || OPCODE == AND || OPCODE == MUL) { \ 188 unsigned int _insn = OPCODE; \ 189 _insn |= RS1(r_A) | RD(r_A); \ 190 if (is_simm13(K)) { \ 191 *prog++ = _insn | IMMED | S13(K); \ 192 } else { \ 193 emit_set_const(K, r_TMP); \ 194 *prog++ = _insn | RS2(r_TMP); \ 195 } \ 196 } \ 197} while (0) 198 199#define emit_loadimm(K, DEST) \ 200do { \ 201 if (is_simm13(K)) { \ 202 /* or %g0, K, DEST */ \ 203 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \ 204 } else { \ 205 emit_set_const(K, DEST); \ 206 } \ 207} while (0) 208 209#define emit_loadptr(BASE, STRUCT, FIELD, DEST) \ 210do { unsigned int _off = offsetof(STRUCT, FIELD); \ 211 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \ 212 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \ 213} while (0) 214 215#define emit_load32(BASE, STRUCT, FIELD, DEST) \ 216do { unsigned int _off = offsetof(STRUCT, FIELD); \ 217 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \ 218 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \ 219} while (0) 220 221#define emit_load16(BASE, STRUCT, FIELD, DEST) \ 222do { unsigned int _off = offsetof(STRUCT, FIELD); \ 223 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \ 224 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \ 225} while (0) 226 227#define __emit_load8(BASE, STRUCT, FIELD, DEST) \ 228do { unsigned int _off = offsetof(STRUCT, FIELD); \ 229 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \ 230} while (0) 231 232#define emit_load8(BASE, STRUCT, FIELD, DEST) \ 233do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ 234 __emit_load8(BASE, STRUCT, FIELD, DEST); \ 235} while (0) 236 237#ifdef CONFIG_SPARC64 238#define BIAS (STACK_BIAS - 4) 239#else 240#define BIAS (-4) 241#endif 242 243#define emit_ldmem(OFF, DEST) \ 244do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ 245} while (0) 246 247#define emit_stmem(OFF, SRC) \ 248do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ 249} while (0) 250 251#ifdef CONFIG_SMP 252#ifdef CONFIG_SPARC64 253#define emit_load_cpu(REG) \ 254 emit_load16(G6, struct thread_info, cpu, REG) 255#else 256#define emit_load_cpu(REG) \ 257 emit_load32(G6, struct thread_info, cpu, REG) 258#endif 259#else 260#define emit_load_cpu(REG) emit_clear(REG) 261#endif 262 263#define emit_skb_loadptr(FIELD, DEST) \ 264 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST) 265#define emit_skb_load32(FIELD, DEST) \ 266 emit_load32(r_SKB, struct sk_buff, FIELD, DEST) 267#define emit_skb_load16(FIELD, DEST) \ 268 emit_load16(r_SKB, struct sk_buff, FIELD, DEST) 269#define __emit_skb_load8(FIELD, DEST) \ 270 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST) 271#define emit_skb_load8(FIELD, DEST) \ 272 emit_load8(r_SKB, struct sk_buff, FIELD, DEST) 273 274#define emit_jmpl(BASE, IMM_OFF, LREG) \ 275 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG)) 276 277#define emit_call(FUNC) \ 278do { void *_here = image + addrs[i] - 8; \ 279 unsigned int _off = (void *)(FUNC) - _here; \ 280 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \ 281 emit_nop(); \ 282} while (0) 283 284#define emit_branch(BR_OPC, DEST) \ 285do { unsigned int _here = addrs[i] - 8; \ 286 *prog++ = BR_OPC | WDISP22((DEST) - _here); \ 287} while (0) 288 289#define emit_branch_off(BR_OPC, OFF) \ 290do { *prog++ = BR_OPC | WDISP22(OFF); \ 291} while (0) 292 293#define emit_jump(DEST) emit_branch(BA, DEST) 294 295#define emit_read_y(REG) *prog++ = RD_Y | RD(REG) 296#define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0) 297 298#define emit_cmp(R1, R2) \ 299 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0)) 300 301#define emit_cmpi(R1, IMM) \ 302 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); 303 304#define emit_btst(R1, R2) \ 305 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0)) 306 307#define emit_btsti(R1, IMM) \ 308 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); 309 310#define emit_sub(R1, R2, R3) \ 311 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3)) 312 313#define emit_subi(R1, IMM, R3) \ 314 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 315 316#define emit_add(R1, R2, R3) \ 317 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3)) 318 319#define emit_addi(R1, IMM, R3) \ 320 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 321 322#define emit_and(R1, R2, R3) \ 323 *prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3)) 324 325#define emit_andi(R1, IMM, R3) \ 326 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3)) 327 328#define emit_alloc_stack(SZ) \ 329 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP)) 330 331#define emit_release_stack(SZ) \ 332 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP)) 333 334/* A note about branch offset calculations. The addrs[] array, 335 * indexed by BPF instruction, records the address after all the 336 * sparc instructions emitted for that BPF instruction. 337 * 338 * The most common case is to emit a branch at the end of such 339 * a code sequence. So this would be two instructions, the 340 * branch and it's delay slot. 341 * 342 * Therefore by default the branch emitters calculate the branch 343 * offset field as: 344 * 345 * destination - (addrs[i] - 8) 346 * 347 * This "addrs[i] - 8" is the address of the branch itself or 348 * what "." would be in assembler notation. The "8" part is 349 * how we take into consideration the branch and it's delay 350 * slot mentioned above. 351 * 352 * Sometimes we need to emit a branch earlier in the code 353 * sequence. And in these situations we adjust "destination" 354 * to accomodate this difference. For example, if we needed 355 * to emit a branch (and it's delay slot) right before the 356 * final instruction emitted for a BPF opcode, we'd use 357 * "destination + 4" instead of just plain "destination" above. 358 * 359 * This is why you see all of these funny emit_branch() and 360 * emit_jump() calls with adjusted offsets. 361 */ 362 363void bpf_jit_compile(struct bpf_prog *fp) 364{ 365 unsigned int cleanup_addr, proglen, oldproglen = 0; 366 u32 temp[8], *prog, *func, seen = 0, pass; 367 const struct sock_filter *filter = fp->insns; 368 int i, flen = fp->len, pc_ret0 = -1; 369 unsigned int *addrs; 370 void *image; 371 372 if (!bpf_jit_enable) 373 return; 374 375 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); 376 if (addrs == NULL) 377 return; 378 379 /* Before first pass, make a rough estimation of addrs[] 380 * each bpf instruction is translated to less than 64 bytes 381 */ 382 for (proglen = 0, i = 0; i < flen; i++) { 383 proglen += 64; 384 addrs[i] = proglen; 385 } 386 cleanup_addr = proglen; /* epilogue address */ 387 image = NULL; 388 for (pass = 0; pass < 10; pass++) { 389 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; 390 391 /* no prologue/epilogue for trivial filters (RET something) */ 392 proglen = 0; 393 prog = temp; 394 395 /* Prologue */ 396 if (seen_or_pass0) { 397 if (seen_or_pass0 & SEEN_MEM) { 398 unsigned int sz = BASE_STACKFRAME; 399 sz += BPF_MEMWORDS * sizeof(u32); 400 emit_alloc_stack(sz); 401 } 402 403 /* Make sure we dont leek kernel memory. */ 404 if (seen_or_pass0 & SEEN_XREG) 405 emit_clear(r_X); 406 407 /* If this filter needs to access skb data, 408 * load %o4 and %o5 with: 409 * %o4 = skb->len - skb->data_len 410 * %o5 = skb->data 411 * And also back up %o7 into r_saved_O7 so we can 412 * invoke the stubs using 'call'. 413 */ 414 if (seen_or_pass0 & SEEN_DATAREF) { 415 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN); 416 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP); 417 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN); 418 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA); 419 } 420 } 421 emit_reg_move(O7, r_saved_O7); 422 423 /* Make sure we dont leak kernel information to the user. */ 424 if (bpf_needs_clear_a(&filter[0])) 425 emit_clear(r_A); /* A = 0 */ 426 427 for (i = 0; i < flen; i++) { 428 unsigned int K = filter[i].k; 429 unsigned int t_offset; 430 unsigned int f_offset; 431 u32 t_op, f_op; 432 u16 code = bpf_anc_helper(&filter[i]); 433 int ilen; 434 435 switch (code) { 436 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ 437 emit_alu_X(ADD); 438 break; 439 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ 440 emit_alu_K(ADD, K); 441 break; 442 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ 443 emit_alu_X(SUB); 444 break; 445 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ 446 emit_alu_K(SUB, K); 447 break; 448 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ 449 emit_alu_X(AND); 450 break; 451 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ 452 emit_alu_K(AND, K); 453 break; 454 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ 455 emit_alu_X(OR); 456 break; 457 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ 458 emit_alu_K(OR, K); 459 break; 460 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ 461 case BPF_ALU | BPF_XOR | BPF_X: 462 emit_alu_X(XOR); 463 break; 464 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ 465 emit_alu_K(XOR, K); 466 break; 467 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ 468 emit_alu_X(SLL); 469 break; 470 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ 471 emit_alu_K(SLL, K); 472 break; 473 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */ 474 emit_alu_X(SRL); 475 break; 476 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ 477 emit_alu_K(SRL, K); 478 break; 479 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ 480 emit_alu_X(MUL); 481 break; 482 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ 483 emit_alu_K(MUL, K); 484 break; 485 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ 486 if (K == 1) 487 break; 488 emit_write_y(G0); 489#ifdef CONFIG_SPARC32 490 /* The Sparc v8 architecture requires 491 * three instructions between a %y 492 * register write and the first use. 493 */ 494 emit_nop(); 495 emit_nop(); 496 emit_nop(); 497#endif 498 emit_alu_K(DIV, K); 499 break; 500 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ 501 emit_cmpi(r_X, 0); 502 if (pc_ret0 > 0) { 503 t_offset = addrs[pc_ret0 - 1]; 504#ifdef CONFIG_SPARC32 505 emit_branch(BE, t_offset + 20); 506#else 507 emit_branch(BE, t_offset + 8); 508#endif 509 emit_nop(); /* delay slot */ 510 } else { 511 emit_branch_off(BNE, 16); 512 emit_nop(); 513#ifdef CONFIG_SPARC32 514 emit_jump(cleanup_addr + 20); 515#else 516 emit_jump(cleanup_addr + 8); 517#endif 518 emit_clear(r_A); 519 } 520 emit_write_y(G0); 521#ifdef CONFIG_SPARC32 522 /* The Sparc v8 architecture requires 523 * three instructions between a %y 524 * register write and the first use. 525 */ 526 emit_nop(); 527 emit_nop(); 528 emit_nop(); 529#endif 530 emit_alu_X(DIV); 531 break; 532 case BPF_ALU | BPF_NEG: 533 emit_neg(); 534 break; 535 case BPF_RET | BPF_K: 536 if (!K) { 537 if (pc_ret0 == -1) 538 pc_ret0 = i; 539 emit_clear(r_A); 540 } else { 541 emit_loadimm(K, r_A); 542 } 543 /* Fallthrough */ 544 case BPF_RET | BPF_A: 545 if (seen_or_pass0) { 546 if (i != flen - 1) { 547 emit_jump(cleanup_addr); 548 emit_nop(); 549 break; 550 } 551 if (seen_or_pass0 & SEEN_MEM) { 552 unsigned int sz = BASE_STACKFRAME; 553 sz += BPF_MEMWORDS * sizeof(u32); 554 emit_release_stack(sz); 555 } 556 } 557 /* jmpl %r_saved_O7 + 8, %g0 */ 558 emit_jmpl(r_saved_O7, 8, G0); 559 emit_reg_move(r_A, O0); /* delay slot */ 560 break; 561 case BPF_MISC | BPF_TAX: 562 seen |= SEEN_XREG; 563 emit_reg_move(r_A, r_X); 564 break; 565 case BPF_MISC | BPF_TXA: 566 seen |= SEEN_XREG; 567 emit_reg_move(r_X, r_A); 568 break; 569 case BPF_ANC | SKF_AD_CPU: 570 emit_load_cpu(r_A); 571 break; 572 case BPF_ANC | SKF_AD_PROTOCOL: 573 emit_skb_load16(protocol, r_A); 574 break; 575 case BPF_ANC | SKF_AD_PKTTYPE: 576 __emit_skb_load8(__pkt_type_offset, r_A); 577 emit_andi(r_A, PKT_TYPE_MAX, r_A); 578 emit_alu_K(SRL, 5); 579 break; 580 case BPF_ANC | SKF_AD_IFINDEX: 581 emit_skb_loadptr(dev, r_A); 582 emit_cmpi(r_A, 0); 583 emit_branch(BE_PTR, cleanup_addr + 4); 584 emit_nop(); 585 emit_load32(r_A, struct net_device, ifindex, r_A); 586 break; 587 case BPF_ANC | SKF_AD_MARK: 588 emit_skb_load32(mark, r_A); 589 break; 590 case BPF_ANC | SKF_AD_QUEUE: 591 emit_skb_load16(queue_mapping, r_A); 592 break; 593 case BPF_ANC | SKF_AD_HATYPE: 594 emit_skb_loadptr(dev, r_A); 595 emit_cmpi(r_A, 0); 596 emit_branch(BE_PTR, cleanup_addr + 4); 597 emit_nop(); 598 emit_load16(r_A, struct net_device, type, r_A); 599 break; 600 case BPF_ANC | SKF_AD_RXHASH: 601 emit_skb_load32(hash, r_A); 602 break; 603 case BPF_ANC | SKF_AD_VLAN_TAG: 604 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 605 emit_skb_load16(vlan_tci, r_A); 606 if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) { 607 emit_alu_K(SRL, 12); 608 emit_andi(r_A, 1, r_A); 609 } else { 610 emit_loadimm(~VLAN_TAG_PRESENT, r_TMP); 611 emit_and(r_A, r_TMP, r_A); 612 } 613 break; 614 case BPF_LD | BPF_W | BPF_LEN: 615 emit_skb_load32(len, r_A); 616 break; 617 case BPF_LDX | BPF_W | BPF_LEN: 618 emit_skb_load32(len, r_X); 619 break; 620 case BPF_LD | BPF_IMM: 621 emit_loadimm(K, r_A); 622 break; 623 case BPF_LDX | BPF_IMM: 624 emit_loadimm(K, r_X); 625 break; 626 case BPF_LD | BPF_MEM: 627 seen |= SEEN_MEM; 628 emit_ldmem(K * 4, r_A); 629 break; 630 case BPF_LDX | BPF_MEM: 631 seen |= SEEN_MEM | SEEN_XREG; 632 emit_ldmem(K * 4, r_X); 633 break; 634 case BPF_ST: 635 seen |= SEEN_MEM; 636 emit_stmem(K * 4, r_A); 637 break; 638 case BPF_STX: 639 seen |= SEEN_MEM | SEEN_XREG; 640 emit_stmem(K * 4, r_X); 641 break; 642 643#define CHOOSE_LOAD_FUNC(K, func) \ 644 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 645 646 case BPF_LD | BPF_W | BPF_ABS: 647 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); 648common_load: seen |= SEEN_DATAREF; 649 emit_loadimm(K, r_OFF); 650 emit_call(func); 651 break; 652 case BPF_LD | BPF_H | BPF_ABS: 653 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); 654 goto common_load; 655 case BPF_LD | BPF_B | BPF_ABS: 656 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); 657 goto common_load; 658 case BPF_LDX | BPF_B | BPF_MSH: 659 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); 660 goto common_load; 661 case BPF_LD | BPF_W | BPF_IND: 662 func = bpf_jit_load_word; 663common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; 664 if (K) { 665 if (is_simm13(K)) { 666 emit_addi(r_X, K, r_OFF); 667 } else { 668 emit_loadimm(K, r_TMP); 669 emit_add(r_X, r_TMP, r_OFF); 670 } 671 } else { 672 emit_reg_move(r_X, r_OFF); 673 } 674 emit_call(func); 675 break; 676 case BPF_LD | BPF_H | BPF_IND: 677 func = bpf_jit_load_half; 678 goto common_load_ind; 679 case BPF_LD | BPF_B | BPF_IND: 680 func = bpf_jit_load_byte; 681 goto common_load_ind; 682 case BPF_JMP | BPF_JA: 683 emit_jump(addrs[i + K]); 684 emit_nop(); 685 break; 686 687#define COND_SEL(CODE, TOP, FOP) \ 688 case CODE: \ 689 t_op = TOP; \ 690 f_op = FOP; \ 691 goto cond_branch 692 693 COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); 694 COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); 695 COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); 696 COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); 697 COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); 698 COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); 699 COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); 700 COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE); 701 702cond_branch: f_offset = addrs[i + filter[i].jf]; 703 t_offset = addrs[i + filter[i].jt]; 704 705 /* same targets, can avoid doing the test :) */ 706 if (filter[i].jt == filter[i].jf) { 707 emit_jump(t_offset); 708 emit_nop(); 709 break; 710 } 711 712 switch (code) { 713 case BPF_JMP | BPF_JGT | BPF_X: 714 case BPF_JMP | BPF_JGE | BPF_X: 715 case BPF_JMP | BPF_JEQ | BPF_X: 716 seen |= SEEN_XREG; 717 emit_cmp(r_A, r_X); 718 break; 719 case BPF_JMP | BPF_JSET | BPF_X: 720 seen |= SEEN_XREG; 721 emit_btst(r_A, r_X); 722 break; 723 case BPF_JMP | BPF_JEQ | BPF_K: 724 case BPF_JMP | BPF_JGT | BPF_K: 725 case BPF_JMP | BPF_JGE | BPF_K: 726 if (is_simm13(K)) { 727 emit_cmpi(r_A, K); 728 } else { 729 emit_loadimm(K, r_TMP); 730 emit_cmp(r_A, r_TMP); 731 } 732 break; 733 case BPF_JMP | BPF_JSET | BPF_K: 734 if (is_simm13(K)) { 735 emit_btsti(r_A, K); 736 } else { 737 emit_loadimm(K, r_TMP); 738 emit_btst(r_A, r_TMP); 739 } 740 break; 741 } 742 if (filter[i].jt != 0) { 743 if (filter[i].jf) 744 t_offset += 8; 745 emit_branch(t_op, t_offset); 746 emit_nop(); /* delay slot */ 747 if (filter[i].jf) { 748 emit_jump(f_offset); 749 emit_nop(); 750 } 751 break; 752 } 753 emit_branch(f_op, f_offset); 754 emit_nop(); /* delay slot */ 755 break; 756 757 default: 758 /* hmm, too complex filter, give up with jit compiler */ 759 goto out; 760 } 761 ilen = (void *) prog - (void *) temp; 762 if (image) { 763 if (unlikely(proglen + ilen > oldproglen)) { 764 pr_err("bpb_jit_compile fatal error\n"); 765 kfree(addrs); 766 module_memfree(image); 767 return; 768 } 769 memcpy(image + proglen, temp, ilen); 770 } 771 proglen += ilen; 772 addrs[i] = proglen; 773 prog = temp; 774 } 775 /* last bpf instruction is always a RET : 776 * use it to give the cleanup instruction(s) addr 777 */ 778 cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */ 779 if (seen_or_pass0 & SEEN_MEM) 780 cleanup_addr -= 4; /* add %sp, X, %sp; */ 781 782 if (image) { 783 if (proglen != oldproglen) 784 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", 785 proglen, oldproglen); 786 break; 787 } 788 if (proglen == oldproglen) { 789 image = module_alloc(proglen); 790 if (!image) 791 goto out; 792 } 793 oldproglen = proglen; 794 } 795 796 if (bpf_jit_enable > 1) 797 bpf_jit_dump(flen, proglen, pass, image); 798 799 if (image) { 800 bpf_flush_icache(image, image + proglen); 801 fp->bpf_func = (void *)image; 802 fp->jited = true; 803 } 804out: 805 kfree(addrs); 806 return; 807} 808 809void bpf_jit_free(struct bpf_prog *fp) 810{ 811 if (fp->jited) 812 module_memfree(fp->bpf_func); 813 814 bpf_prog_unlock_free(fp); 815} 816