1/* 2 * Packet matching code for ARP packets. 3 * 4 * Based heavily, if not almost entirely, upon ip_tables.c framework. 5 * 6 * Some ARP specific bits are: 7 * 8 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 9 * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> 10 * 11 */ 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#include <linux/kernel.h> 14#include <linux/skbuff.h> 15#include <linux/netdevice.h> 16#include <linux/capability.h> 17#include <linux/if_arp.h> 18#include <linux/kmod.h> 19#include <linux/vmalloc.h> 20#include <linux/proc_fs.h> 21#include <linux/module.h> 22#include <linux/init.h> 23#include <linux/mutex.h> 24#include <linux/err.h> 25#include <net/compat.h> 26#include <net/sock.h> 27#include <asm/uaccess.h> 28 29#include <linux/netfilter/x_tables.h> 30#include <linux/netfilter_arp/arp_tables.h> 31#include "../../netfilter/xt_repldata.h" 32 33MODULE_LICENSE("GPL"); 34MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 35MODULE_DESCRIPTION("arptables core"); 36 37/*#define DEBUG_ARP_TABLES*/ 38/*#define DEBUG_ARP_TABLES_USER*/ 39 40#ifdef DEBUG_ARP_TABLES 41#define dprintf(format, args...) printk(format , ## args) 42#else 43#define dprintf(format, args...) 44#endif 45 46#ifdef DEBUG_ARP_TABLES_USER 47#define duprintf(format, args...) printk(format , ## args) 48#else 49#define duprintf(format, args...) 50#endif 51 52#ifdef CONFIG_NETFILTER_DEBUG 53#define ARP_NF_ASSERT(x) WARN_ON(!(x)) 54#else 55#define ARP_NF_ASSERT(x) 56#endif 57 58void *arpt_alloc_initial_table(const struct xt_table *info) 59{ 60 return xt_alloc_initial_table(arpt, ARPT); 61} 62EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); 63 64static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 65 const char *hdr_addr, int len) 66{ 67 int i, ret; 68 69 if (len > ARPT_DEV_ADDR_LEN_MAX) 70 len = ARPT_DEV_ADDR_LEN_MAX; 71 72 ret = 0; 73 for (i = 0; i < len; i++) 74 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; 75 76 return ret != 0; 77} 78 79/* 80 * Unfortunately, _b and _mask are not aligned to an int (or long int) 81 * Some arches dont care, unrolling the loop is a win on them. 82 * For other arches, we only have a 16bit alignement. 83 */ 84static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) 85{ 86#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 87 unsigned long ret = ifname_compare_aligned(_a, _b, _mask); 88#else 89 unsigned long ret = 0; 90 const u16 *a = (const u16 *)_a; 91 const u16 *b = (const u16 *)_b; 92 const u16 *mask = (const u16 *)_mask; 93 int i; 94 95 for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) 96 ret |= (a[i] ^ b[i]) & mask[i]; 97#endif 98 return ret; 99} 100 101/* Returns whether packet matches rule or not. */ 102static inline int arp_packet_match(const struct arphdr *arphdr, 103 struct net_device *dev, 104 const char *indev, 105 const char *outdev, 106 const struct arpt_arp *arpinfo) 107{ 108 const char *arpptr = (char *)(arphdr + 1); 109 const char *src_devaddr, *tgt_devaddr; 110 __be32 src_ipaddr, tgt_ipaddr; 111 long ret; 112 113#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) 114 115 if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, 116 ARPT_INV_ARPOP)) { 117 dprintf("ARP operation field mismatch.\n"); 118 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", 119 arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); 120 return 0; 121 } 122 123 if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, 124 ARPT_INV_ARPHRD)) { 125 dprintf("ARP hardware address format mismatch.\n"); 126 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", 127 arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); 128 return 0; 129 } 130 131 if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, 132 ARPT_INV_ARPPRO)) { 133 dprintf("ARP protocol address format mismatch.\n"); 134 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", 135 arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); 136 return 0; 137 } 138 139 if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, 140 ARPT_INV_ARPHLN)) { 141 dprintf("ARP hardware address length mismatch.\n"); 142 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", 143 arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); 144 return 0; 145 } 146 147 src_devaddr = arpptr; 148 arpptr += dev->addr_len; 149 memcpy(&src_ipaddr, arpptr, sizeof(u32)); 150 arpptr += sizeof(u32); 151 tgt_devaddr = arpptr; 152 arpptr += dev->addr_len; 153 memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); 154 155 if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), 156 ARPT_INV_SRCDEVADDR) || 157 FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), 158 ARPT_INV_TGTDEVADDR)) { 159 dprintf("Source or target device address mismatch.\n"); 160 161 return 0; 162 } 163 164 if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, 165 ARPT_INV_SRCIP) || 166 FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), 167 ARPT_INV_TGTIP)) { 168 dprintf("Source or target IP address mismatch.\n"); 169 170 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", 171 &src_ipaddr, 172 &arpinfo->smsk.s_addr, 173 &arpinfo->src.s_addr, 174 arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); 175 dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", 176 &tgt_ipaddr, 177 &arpinfo->tmsk.s_addr, 178 &arpinfo->tgt.s_addr, 179 arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); 180 return 0; 181 } 182 183 /* Look for ifname matches. */ 184 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); 185 186 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { 187 dprintf("VIA in mismatch (%s vs %s).%s\n", 188 indev, arpinfo->iniface, 189 arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); 190 return 0; 191 } 192 193 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); 194 195 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { 196 dprintf("VIA out mismatch (%s vs %s).%s\n", 197 outdev, arpinfo->outiface, 198 arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); 199 return 0; 200 } 201 202 return 1; 203#undef FWINV 204} 205 206static inline int arp_checkentry(const struct arpt_arp *arp) 207{ 208 if (arp->flags & ~ARPT_F_MASK) { 209 duprintf("Unknown flag bits set: %08X\n", 210 arp->flags & ~ARPT_F_MASK); 211 return 0; 212 } 213 if (arp->invflags & ~ARPT_INV_MASK) { 214 duprintf("Unknown invflag bits set: %08X\n", 215 arp->invflags & ~ARPT_INV_MASK); 216 return 0; 217 } 218 219 return 1; 220} 221 222static unsigned int 223arpt_error(struct sk_buff *skb, const struct xt_action_param *par) 224{ 225 net_err_ratelimited("arp_tables: error: '%s'\n", 226 (const char *)par->targinfo); 227 228 return NF_DROP; 229} 230 231static inline const struct xt_entry_target * 232arpt_get_target_c(const struct arpt_entry *e) 233{ 234 return arpt_get_target((struct arpt_entry *)e); 235} 236 237static inline struct arpt_entry * 238get_entry(const void *base, unsigned int offset) 239{ 240 return (struct arpt_entry *)(base + offset); 241} 242 243static inline __pure 244struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) 245{ 246 return (void *)entry + entry->next_offset; 247} 248 249unsigned int arpt_do_table(struct sk_buff *skb, 250 unsigned int hook, 251 const struct nf_hook_state *state, 252 struct xt_table *table) 253{ 254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 255 unsigned int verdict = NF_DROP; 256 const struct arphdr *arp; 257 struct arpt_entry *e, *back; 258 const char *indev, *outdev; 259 void *table_base; 260 const struct xt_table_info *private; 261 struct xt_action_param acpar; 262 unsigned int addend; 263 264 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 265 return NF_DROP; 266 267 indev = state->in ? state->in->name : nulldevname; 268 outdev = state->out ? state->out->name : nulldevname; 269 270 local_bh_disable(); 271 addend = xt_write_recseq_begin(); 272 private = table->private; 273 /* 274 * Ensure we load private-> members after we've fetched the base 275 * pointer. 276 */ 277 smp_read_barrier_depends(); 278 table_base = private->entries[smp_processor_id()]; 279 280 e = get_entry(table_base, private->hook_entry[hook]); 281 back = get_entry(table_base, private->underflow[hook]); 282 283 acpar.in = state->in; 284 acpar.out = state->out; 285 acpar.hooknum = hook; 286 acpar.family = NFPROTO_ARP; 287 acpar.hotdrop = false; 288 289 arp = arp_hdr(skb); 290 do { 291 const struct xt_entry_target *t; 292 293 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 294 e = arpt_next_entry(e); 295 continue; 296 } 297 298 ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1); 299 300 t = arpt_get_target_c(e); 301 302 /* Standard target? */ 303 if (!t->u.kernel.target->target) { 304 int v; 305 306 v = ((struct xt_standard_target *)t)->verdict; 307 if (v < 0) { 308 /* Pop from stack? */ 309 if (v != XT_RETURN) { 310 verdict = (unsigned int)(-v) - 1; 311 break; 312 } 313 e = back; 314 back = get_entry(table_base, back->comefrom); 315 continue; 316 } 317 if (table_base + v 318 != arpt_next_entry(e)) { 319 /* Save old back ptr in next entry */ 320 struct arpt_entry *next = arpt_next_entry(e); 321 next->comefrom = (void *)back - table_base; 322 323 /* set back pointer to next entry */ 324 back = next; 325 } 326 327 e = get_entry(table_base, v); 328 continue; 329 } 330 331 /* Targets which reenter must return 332 * abs. verdicts 333 */ 334 acpar.target = t->u.kernel.target; 335 acpar.targinfo = t->data; 336 verdict = t->u.kernel.target->target(skb, &acpar); 337 338 /* Target might have changed stuff. */ 339 arp = arp_hdr(skb); 340 341 if (verdict == XT_CONTINUE) 342 e = arpt_next_entry(e); 343 else 344 /* Verdict */ 345 break; 346 } while (!acpar.hotdrop); 347 xt_write_recseq_end(addend); 348 local_bh_enable(); 349 350 if (acpar.hotdrop) 351 return NF_DROP; 352 else 353 return verdict; 354} 355 356/* All zeroes == unconditional rule. */ 357static inline bool unconditional(const struct arpt_arp *arp) 358{ 359 static const struct arpt_arp uncond; 360 361 return memcmp(arp, &uncond, sizeof(uncond)) == 0; 362} 363 364/* Figures out from what hook each rule can be called: returns 0 if 365 * there are loops. Puts hook bitmask in comefrom. 366 */ 367static int mark_source_chains(const struct xt_table_info *newinfo, 368 unsigned int valid_hooks, void *entry0) 369{ 370 unsigned int hook; 371 372 /* No recursion; use packet counter to save back ptrs (reset 373 * to 0 as we leave), and comefrom to save source hook bitmask. 374 */ 375 for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { 376 unsigned int pos = newinfo->hook_entry[hook]; 377 struct arpt_entry *e 378 = (struct arpt_entry *)(entry0 + pos); 379 380 if (!(valid_hooks & (1 << hook))) 381 continue; 382 383 /* Set initial back pointer. */ 384 e->counters.pcnt = pos; 385 386 for (;;) { 387 const struct xt_standard_target *t 388 = (void *)arpt_get_target_c(e); 389 int visited = e->comefrom & (1 << hook); 390 391 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { 392 pr_notice("arptables: loop hook %u pos %u %08X.\n", 393 hook, pos, e->comefrom); 394 return 0; 395 } 396 e->comefrom 397 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); 398 399 /* Unconditional return/END. */ 400 if ((e->target_offset == sizeof(struct arpt_entry) && 401 (strcmp(t->target.u.user.name, 402 XT_STANDARD_TARGET) == 0) && 403 t->verdict < 0 && unconditional(&e->arp)) || 404 visited) { 405 unsigned int oldpos, size; 406 407 if ((strcmp(t->target.u.user.name, 408 XT_STANDARD_TARGET) == 0) && 409 t->verdict < -NF_MAX_VERDICT - 1) { 410 duprintf("mark_source_chains: bad " 411 "negative verdict (%i)\n", 412 t->verdict); 413 return 0; 414 } 415 416 /* Return: backtrack through the last 417 * big jump. 418 */ 419 do { 420 e->comefrom ^= (1<<NF_ARP_NUMHOOKS); 421 oldpos = pos; 422 pos = e->counters.pcnt; 423 e->counters.pcnt = 0; 424 425 /* We're at the start. */ 426 if (pos == oldpos) 427 goto next; 428 429 e = (struct arpt_entry *) 430 (entry0 + pos); 431 } while (oldpos == pos + e->next_offset); 432 433 /* Move along one */ 434 size = e->next_offset; 435 e = (struct arpt_entry *) 436 (entry0 + pos + size); 437 e->counters.pcnt = pos; 438 pos += size; 439 } else { 440 int newpos = t->verdict; 441 442 if (strcmp(t->target.u.user.name, 443 XT_STANDARD_TARGET) == 0 && 444 newpos >= 0) { 445 if (newpos > newinfo->size - 446 sizeof(struct arpt_entry)) { 447 duprintf("mark_source_chains: " 448 "bad verdict (%i)\n", 449 newpos); 450 return 0; 451 } 452 453 /* This a jump; chase it. */ 454 duprintf("Jump rule %u -> %u\n", 455 pos, newpos); 456 } else { 457 /* ... this is a fallthru */ 458 newpos = pos + e->next_offset; 459 } 460 e = (struct arpt_entry *) 461 (entry0 + newpos); 462 e->counters.pcnt = pos; 463 pos = newpos; 464 } 465 } 466 next: 467 duprintf("Finished chain %u\n", hook); 468 } 469 return 1; 470} 471 472static inline int check_entry(const struct arpt_entry *e, const char *name) 473{ 474 const struct xt_entry_target *t; 475 476 if (!arp_checkentry(&e->arp)) { 477 duprintf("arp_tables: arp check failed %p %s.\n", e, name); 478 return -EINVAL; 479 } 480 481 if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) 482 return -EINVAL; 483 484 t = arpt_get_target_c(e); 485 if (e->target_offset + t->u.target_size > e->next_offset) 486 return -EINVAL; 487 488 return 0; 489} 490 491static inline int check_target(struct arpt_entry *e, const char *name) 492{ 493 struct xt_entry_target *t = arpt_get_target(e); 494 int ret; 495 struct xt_tgchk_param par = { 496 .table = name, 497 .entryinfo = e, 498 .target = t->u.kernel.target, 499 .targinfo = t->data, 500 .hook_mask = e->comefrom, 501 .family = NFPROTO_ARP, 502 }; 503 504 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 505 if (ret < 0) { 506 duprintf("arp_tables: check failed for `%s'.\n", 507 t->u.kernel.target->name); 508 return ret; 509 } 510 return 0; 511} 512 513static inline int 514find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) 515{ 516 struct xt_entry_target *t; 517 struct xt_target *target; 518 int ret; 519 520 ret = check_entry(e, name); 521 if (ret) 522 return ret; 523 524 t = arpt_get_target(e); 525 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, 526 t->u.user.revision); 527 if (IS_ERR(target)) { 528 duprintf("find_check_entry: `%s' not found\n", t->u.user.name); 529 ret = PTR_ERR(target); 530 goto out; 531 } 532 t->u.kernel.target = target; 533 534 ret = check_target(e, name); 535 if (ret) 536 goto err; 537 return 0; 538err: 539 module_put(t->u.kernel.target->me); 540out: 541 return ret; 542} 543 544static bool check_underflow(const struct arpt_entry *e) 545{ 546 const struct xt_entry_target *t; 547 unsigned int verdict; 548 549 if (!unconditional(&e->arp)) 550 return false; 551 t = arpt_get_target_c(e); 552 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 553 return false; 554 verdict = ((struct xt_standard_target *)t)->verdict; 555 verdict = -verdict - 1; 556 return verdict == NF_DROP || verdict == NF_ACCEPT; 557} 558 559static inline int check_entry_size_and_hooks(struct arpt_entry *e, 560 struct xt_table_info *newinfo, 561 const unsigned char *base, 562 const unsigned char *limit, 563 const unsigned int *hook_entries, 564 const unsigned int *underflows, 565 unsigned int valid_hooks) 566{ 567 unsigned int h; 568 569 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || 570 (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { 571 duprintf("Bad offset %p\n", e); 572 return -EINVAL; 573 } 574 575 if (e->next_offset 576 < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { 577 duprintf("checking: element %p size %u\n", 578 e, e->next_offset); 579 return -EINVAL; 580 } 581 582 /* Check hooks & underflows */ 583 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 584 if (!(valid_hooks & (1 << h))) 585 continue; 586 if ((unsigned char *)e - base == hook_entries[h]) 587 newinfo->hook_entry[h] = hook_entries[h]; 588 if ((unsigned char *)e - base == underflows[h]) { 589 if (!check_underflow(e)) { 590 pr_err("Underflows must be unconditional and " 591 "use the STANDARD target with " 592 "ACCEPT/DROP\n"); 593 return -EINVAL; 594 } 595 newinfo->underflow[h] = underflows[h]; 596 } 597 } 598 599 /* Clear counters and comefrom */ 600 e->counters = ((struct xt_counters) { 0, 0 }); 601 e->comefrom = 0; 602 return 0; 603} 604 605static inline void cleanup_entry(struct arpt_entry *e) 606{ 607 struct xt_tgdtor_param par; 608 struct xt_entry_target *t; 609 610 t = arpt_get_target(e); 611 par.target = t->u.kernel.target; 612 par.targinfo = t->data; 613 par.family = NFPROTO_ARP; 614 if (par.target->destroy != NULL) 615 par.target->destroy(&par); 616 module_put(par.target->me); 617} 618 619/* Checks and translates the user-supplied table segment (held in 620 * newinfo). 621 */ 622static int translate_table(struct xt_table_info *newinfo, void *entry0, 623 const struct arpt_replace *repl) 624{ 625 struct arpt_entry *iter; 626 unsigned int i; 627 int ret = 0; 628 629 newinfo->size = repl->size; 630 newinfo->number = repl->num_entries; 631 632 /* Init all hooks to impossible value. */ 633 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 634 newinfo->hook_entry[i] = 0xFFFFFFFF; 635 newinfo->underflow[i] = 0xFFFFFFFF; 636 } 637 638 duprintf("translate_table: size %u\n", newinfo->size); 639 i = 0; 640 641 /* Walk through entries, checking offsets. */ 642 xt_entry_foreach(iter, entry0, newinfo->size) { 643 ret = check_entry_size_and_hooks(iter, newinfo, entry0, 644 entry0 + repl->size, 645 repl->hook_entry, 646 repl->underflow, 647 repl->valid_hooks); 648 if (ret != 0) 649 break; 650 ++i; 651 if (strcmp(arpt_get_target(iter)->u.user.name, 652 XT_ERROR_TARGET) == 0) 653 ++newinfo->stacksize; 654 } 655 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); 656 if (ret != 0) 657 return ret; 658 659 if (i != repl->num_entries) { 660 duprintf("translate_table: %u not %u entries\n", 661 i, repl->num_entries); 662 return -EINVAL; 663 } 664 665 /* Check hooks all assigned */ 666 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 667 /* Only hooks which are valid */ 668 if (!(repl->valid_hooks & (1 << i))) 669 continue; 670 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 671 duprintf("Invalid hook entry %u %u\n", 672 i, repl->hook_entry[i]); 673 return -EINVAL; 674 } 675 if (newinfo->underflow[i] == 0xFFFFFFFF) { 676 duprintf("Invalid underflow %u %u\n", 677 i, repl->underflow[i]); 678 return -EINVAL; 679 } 680 } 681 682 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { 683 duprintf("Looping hook\n"); 684 return -ELOOP; 685 } 686 687 /* Finally, each sanity check must pass */ 688 i = 0; 689 xt_entry_foreach(iter, entry0, newinfo->size) { 690 ret = find_check_entry(iter, repl->name, repl->size); 691 if (ret != 0) 692 break; 693 ++i; 694 } 695 696 if (ret != 0) { 697 xt_entry_foreach(iter, entry0, newinfo->size) { 698 if (i-- == 0) 699 break; 700 cleanup_entry(iter); 701 } 702 return ret; 703 } 704 705 /* And one copy for every other CPU */ 706 for_each_possible_cpu(i) { 707 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 708 memcpy(newinfo->entries[i], entry0, newinfo->size); 709 } 710 711 return ret; 712} 713 714static void get_counters(const struct xt_table_info *t, 715 struct xt_counters counters[]) 716{ 717 struct arpt_entry *iter; 718 unsigned int cpu; 719 unsigned int i; 720 721 for_each_possible_cpu(cpu) { 722 seqcount_t *s = &per_cpu(xt_recseq, cpu); 723 724 i = 0; 725 xt_entry_foreach(iter, t->entries[cpu], t->size) { 726 u64 bcnt, pcnt; 727 unsigned int start; 728 729 do { 730 start = read_seqcount_begin(s); 731 bcnt = iter->counters.bcnt; 732 pcnt = iter->counters.pcnt; 733 } while (read_seqcount_retry(s, start)); 734 735 ADD_COUNTER(counters[i], bcnt, pcnt); 736 ++i; 737 } 738 } 739} 740 741static struct xt_counters *alloc_counters(const struct xt_table *table) 742{ 743 unsigned int countersize; 744 struct xt_counters *counters; 745 const struct xt_table_info *private = table->private; 746 747 /* We need atomic snapshot of counters: rest doesn't change 748 * (other than comefrom, which userspace doesn't care 749 * about). 750 */ 751 countersize = sizeof(struct xt_counters) * private->number; 752 counters = vzalloc(countersize); 753 754 if (counters == NULL) 755 return ERR_PTR(-ENOMEM); 756 757 get_counters(private, counters); 758 759 return counters; 760} 761 762static int copy_entries_to_user(unsigned int total_size, 763 const struct xt_table *table, 764 void __user *userptr) 765{ 766 unsigned int off, num; 767 const struct arpt_entry *e; 768 struct xt_counters *counters; 769 struct xt_table_info *private = table->private; 770 int ret = 0; 771 void *loc_cpu_entry; 772 773 counters = alloc_counters(table); 774 if (IS_ERR(counters)) 775 return PTR_ERR(counters); 776 777 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 778 /* ... then copy entire thing ... */ 779 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 780 ret = -EFAULT; 781 goto free_counters; 782 } 783 784 /* FIXME: use iterator macros --RR */ 785 /* ... then go back and fix counters and names */ 786 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 787 const struct xt_entry_target *t; 788 789 e = (struct arpt_entry *)(loc_cpu_entry + off); 790 if (copy_to_user(userptr + off 791 + offsetof(struct arpt_entry, counters), 792 &counters[num], 793 sizeof(counters[num])) != 0) { 794 ret = -EFAULT; 795 goto free_counters; 796 } 797 798 t = arpt_get_target_c(e); 799 if (copy_to_user(userptr + off + e->target_offset 800 + offsetof(struct xt_entry_target, 801 u.user.name), 802 t->u.kernel.target->name, 803 strlen(t->u.kernel.target->name)+1) != 0) { 804 ret = -EFAULT; 805 goto free_counters; 806 } 807 } 808 809 free_counters: 810 vfree(counters); 811 return ret; 812} 813 814#ifdef CONFIG_COMPAT 815static void compat_standard_from_user(void *dst, const void *src) 816{ 817 int v = *(compat_int_t *)src; 818 819 if (v > 0) 820 v += xt_compat_calc_jump(NFPROTO_ARP, v); 821 memcpy(dst, &v, sizeof(v)); 822} 823 824static int compat_standard_to_user(void __user *dst, const void *src) 825{ 826 compat_int_t cv = *(int *)src; 827 828 if (cv > 0) 829 cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); 830 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 831} 832 833static int compat_calc_entry(const struct arpt_entry *e, 834 const struct xt_table_info *info, 835 const void *base, struct xt_table_info *newinfo) 836{ 837 const struct xt_entry_target *t; 838 unsigned int entry_offset; 839 int off, i, ret; 840 841 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 842 entry_offset = (void *)e - base; 843 844 t = arpt_get_target_c(e); 845 off += xt_compat_target_offset(t->u.kernel.target); 846 newinfo->size -= off; 847 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 848 if (ret) 849 return ret; 850 851 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 852 if (info->hook_entry[i] && 853 (e < (struct arpt_entry *)(base + info->hook_entry[i]))) 854 newinfo->hook_entry[i] -= off; 855 if (info->underflow[i] && 856 (e < (struct arpt_entry *)(base + info->underflow[i]))) 857 newinfo->underflow[i] -= off; 858 } 859 return 0; 860} 861 862static int compat_table_info(const struct xt_table_info *info, 863 struct xt_table_info *newinfo) 864{ 865 struct arpt_entry *iter; 866 void *loc_cpu_entry; 867 int ret; 868 869 if (!newinfo || !info) 870 return -EINVAL; 871 872 /* we dont care about newinfo->entries[] */ 873 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 874 newinfo->initial_entries = 0; 875 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 876 xt_compat_init_offsets(NFPROTO_ARP, info->number); 877 xt_entry_foreach(iter, loc_cpu_entry, info->size) { 878 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); 879 if (ret != 0) 880 return ret; 881 } 882 return 0; 883} 884#endif 885 886static int get_info(struct net *net, void __user *user, 887 const int *len, int compat) 888{ 889 char name[XT_TABLE_MAXNAMELEN]; 890 struct xt_table *t; 891 int ret; 892 893 if (*len != sizeof(struct arpt_getinfo)) { 894 duprintf("length %u != %Zu\n", *len, 895 sizeof(struct arpt_getinfo)); 896 return -EINVAL; 897 } 898 899 if (copy_from_user(name, user, sizeof(name)) != 0) 900 return -EFAULT; 901 902 name[XT_TABLE_MAXNAMELEN-1] = '\0'; 903#ifdef CONFIG_COMPAT 904 if (compat) 905 xt_compat_lock(NFPROTO_ARP); 906#endif 907 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), 908 "arptable_%s", name); 909 if (!IS_ERR_OR_NULL(t)) { 910 struct arpt_getinfo info; 911 const struct xt_table_info *private = t->private; 912#ifdef CONFIG_COMPAT 913 struct xt_table_info tmp; 914 915 if (compat) { 916 ret = compat_table_info(private, &tmp); 917 xt_compat_flush_offsets(NFPROTO_ARP); 918 private = &tmp; 919 } 920#endif 921 memset(&info, 0, sizeof(info)); 922 info.valid_hooks = t->valid_hooks; 923 memcpy(info.hook_entry, private->hook_entry, 924 sizeof(info.hook_entry)); 925 memcpy(info.underflow, private->underflow, 926 sizeof(info.underflow)); 927 info.num_entries = private->number; 928 info.size = private->size; 929 strcpy(info.name, name); 930 931 if (copy_to_user(user, &info, *len) != 0) 932 ret = -EFAULT; 933 else 934 ret = 0; 935 xt_table_unlock(t); 936 module_put(t->me); 937 } else 938 ret = t ? PTR_ERR(t) : -ENOENT; 939#ifdef CONFIG_COMPAT 940 if (compat) 941 xt_compat_unlock(NFPROTO_ARP); 942#endif 943 return ret; 944} 945 946static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, 947 const int *len) 948{ 949 int ret; 950 struct arpt_get_entries get; 951 struct xt_table *t; 952 953 if (*len < sizeof(get)) { 954 duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); 955 return -EINVAL; 956 } 957 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 958 return -EFAULT; 959 if (*len != sizeof(struct arpt_get_entries) + get.size) { 960 duprintf("get_entries: %u != %Zu\n", *len, 961 sizeof(struct arpt_get_entries) + get.size); 962 return -EINVAL; 963 } 964 965 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 966 if (!IS_ERR_OR_NULL(t)) { 967 const struct xt_table_info *private = t->private; 968 969 duprintf("t->private->number = %u\n", 970 private->number); 971 if (get.size == private->size) 972 ret = copy_entries_to_user(private->size, 973 t, uptr->entrytable); 974 else { 975 duprintf("get_entries: I've got %u not %u!\n", 976 private->size, get.size); 977 ret = -EAGAIN; 978 } 979 module_put(t->me); 980 xt_table_unlock(t); 981 } else 982 ret = t ? PTR_ERR(t) : -ENOENT; 983 984 return ret; 985} 986 987static int __do_replace(struct net *net, const char *name, 988 unsigned int valid_hooks, 989 struct xt_table_info *newinfo, 990 unsigned int num_counters, 991 void __user *counters_ptr) 992{ 993 int ret; 994 struct xt_table *t; 995 struct xt_table_info *oldinfo; 996 struct xt_counters *counters; 997 void *loc_cpu_old_entry; 998 struct arpt_entry *iter; 999 1000 ret = 0; 1001 counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1002 if (!counters) { 1003 ret = -ENOMEM; 1004 goto out; 1005 } 1006 1007 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), 1008 "arptable_%s", name); 1009 if (IS_ERR_OR_NULL(t)) { 1010 ret = t ? PTR_ERR(t) : -ENOENT; 1011 goto free_newinfo_counters_untrans; 1012 } 1013 1014 /* You lied! */ 1015 if (valid_hooks != t->valid_hooks) { 1016 duprintf("Valid hook crap: %08X vs %08X\n", 1017 valid_hooks, t->valid_hooks); 1018 ret = -EINVAL; 1019 goto put_module; 1020 } 1021 1022 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); 1023 if (!oldinfo) 1024 goto put_module; 1025 1026 /* Update module usage count based on number of rules */ 1027 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1028 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1029 if ((oldinfo->number > oldinfo->initial_entries) || 1030 (newinfo->number <= oldinfo->initial_entries)) 1031 module_put(t->me); 1032 if ((oldinfo->number > oldinfo->initial_entries) && 1033 (newinfo->number <= oldinfo->initial_entries)) 1034 module_put(t->me); 1035 1036 /* Get the old counters, and synchronize with replace */ 1037 get_counters(oldinfo, counters); 1038 1039 /* Decrease module usage counts and free resource */ 1040 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1041 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) 1042 cleanup_entry(iter); 1043 1044 xt_free_table_info(oldinfo); 1045 if (copy_to_user(counters_ptr, counters, 1046 sizeof(struct xt_counters) * num_counters) != 0) { 1047 /* Silent error, can't fail, new table is already in place */ 1048 net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); 1049 } 1050 vfree(counters); 1051 xt_table_unlock(t); 1052 return ret; 1053 1054 put_module: 1055 module_put(t->me); 1056 xt_table_unlock(t); 1057 free_newinfo_counters_untrans: 1058 vfree(counters); 1059 out: 1060 return ret; 1061} 1062 1063static int do_replace(struct net *net, const void __user *user, 1064 unsigned int len) 1065{ 1066 int ret; 1067 struct arpt_replace tmp; 1068 struct xt_table_info *newinfo; 1069 void *loc_cpu_entry; 1070 struct arpt_entry *iter; 1071 1072 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1073 return -EFAULT; 1074 1075 /* overflow check */ 1076 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1077 return -ENOMEM; 1078 if (tmp.num_counters == 0) 1079 return -EINVAL; 1080 1081 tmp.name[sizeof(tmp.name)-1] = 0; 1082 1083 newinfo = xt_alloc_table_info(tmp.size); 1084 if (!newinfo) 1085 return -ENOMEM; 1086 1087 /* choose the copy that is on our node/cpu */ 1088 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1089 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1090 tmp.size) != 0) { 1091 ret = -EFAULT; 1092 goto free_newinfo; 1093 } 1094 1095 ret = translate_table(newinfo, loc_cpu_entry, &tmp); 1096 if (ret != 0) 1097 goto free_newinfo; 1098 1099 duprintf("arp_tables: Translated table\n"); 1100 1101 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1102 tmp.num_counters, tmp.counters); 1103 if (ret) 1104 goto free_newinfo_untrans; 1105 return 0; 1106 1107 free_newinfo_untrans: 1108 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) 1109 cleanup_entry(iter); 1110 free_newinfo: 1111 xt_free_table_info(newinfo); 1112 return ret; 1113} 1114 1115static int do_add_counters(struct net *net, const void __user *user, 1116 unsigned int len, int compat) 1117{ 1118 unsigned int i, curcpu; 1119 struct xt_counters_info tmp; 1120 struct xt_counters *paddc; 1121 unsigned int num_counters; 1122 const char *name; 1123 int size; 1124 void *ptmp; 1125 struct xt_table *t; 1126 const struct xt_table_info *private; 1127 int ret = 0; 1128 void *loc_cpu_entry; 1129 struct arpt_entry *iter; 1130 unsigned int addend; 1131#ifdef CONFIG_COMPAT 1132 struct compat_xt_counters_info compat_tmp; 1133 1134 if (compat) { 1135 ptmp = &compat_tmp; 1136 size = sizeof(struct compat_xt_counters_info); 1137 } else 1138#endif 1139 { 1140 ptmp = &tmp; 1141 size = sizeof(struct xt_counters_info); 1142 } 1143 1144 if (copy_from_user(ptmp, user, size) != 0) 1145 return -EFAULT; 1146 1147#ifdef CONFIG_COMPAT 1148 if (compat) { 1149 num_counters = compat_tmp.num_counters; 1150 name = compat_tmp.name; 1151 } else 1152#endif 1153 { 1154 num_counters = tmp.num_counters; 1155 name = tmp.name; 1156 } 1157 1158 if (len != size + num_counters * sizeof(struct xt_counters)) 1159 return -EINVAL; 1160 1161 paddc = vmalloc(len - size); 1162 if (!paddc) 1163 return -ENOMEM; 1164 1165 if (copy_from_user(paddc, user + size, len - size) != 0) { 1166 ret = -EFAULT; 1167 goto free; 1168 } 1169 1170 t = xt_find_table_lock(net, NFPROTO_ARP, name); 1171 if (IS_ERR_OR_NULL(t)) { 1172 ret = t ? PTR_ERR(t) : -ENOENT; 1173 goto free; 1174 } 1175 1176 local_bh_disable(); 1177 private = t->private; 1178 if (private->number != num_counters) { 1179 ret = -EINVAL; 1180 goto unlock_up_free; 1181 } 1182 1183 i = 0; 1184 /* Choose the copy that is on our node */ 1185 curcpu = smp_processor_id(); 1186 loc_cpu_entry = private->entries[curcpu]; 1187 addend = xt_write_recseq_begin(); 1188 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1189 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1190 ++i; 1191 } 1192 xt_write_recseq_end(addend); 1193 unlock_up_free: 1194 local_bh_enable(); 1195 xt_table_unlock(t); 1196 module_put(t->me); 1197 free: 1198 vfree(paddc); 1199 1200 return ret; 1201} 1202 1203#ifdef CONFIG_COMPAT 1204static inline void compat_release_entry(struct compat_arpt_entry *e) 1205{ 1206 struct xt_entry_target *t; 1207 1208 t = compat_arpt_get_target(e); 1209 module_put(t->u.kernel.target->me); 1210} 1211 1212static inline int 1213check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, 1214 struct xt_table_info *newinfo, 1215 unsigned int *size, 1216 const unsigned char *base, 1217 const unsigned char *limit, 1218 const unsigned int *hook_entries, 1219 const unsigned int *underflows, 1220 const char *name) 1221{ 1222 struct xt_entry_target *t; 1223 struct xt_target *target; 1224 unsigned int entry_offset; 1225 int ret, off, h; 1226 1227 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1228 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || 1229 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { 1230 duprintf("Bad offset %p, limit = %p\n", e, limit); 1231 return -EINVAL; 1232 } 1233 1234 if (e->next_offset < sizeof(struct compat_arpt_entry) + 1235 sizeof(struct compat_xt_entry_target)) { 1236 duprintf("checking: element %p size %u\n", 1237 e, e->next_offset); 1238 return -EINVAL; 1239 } 1240 1241 /* For purposes of check_entry casting the compat entry is fine */ 1242 ret = check_entry((struct arpt_entry *)e, name); 1243 if (ret) 1244 return ret; 1245 1246 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1247 entry_offset = (void *)e - (void *)base; 1248 1249 t = compat_arpt_get_target(e); 1250 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, 1251 t->u.user.revision); 1252 if (IS_ERR(target)) { 1253 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", 1254 t->u.user.name); 1255 ret = PTR_ERR(target); 1256 goto out; 1257 } 1258 t->u.kernel.target = target; 1259 1260 off += xt_compat_target_offset(target); 1261 *size += off; 1262 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 1263 if (ret) 1264 goto release_target; 1265 1266 /* Check hooks & underflows */ 1267 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 1268 if ((unsigned char *)e - base == hook_entries[h]) 1269 newinfo->hook_entry[h] = hook_entries[h]; 1270 if ((unsigned char *)e - base == underflows[h]) 1271 newinfo->underflow[h] = underflows[h]; 1272 } 1273 1274 /* Clear counters and comefrom */ 1275 memset(&e->counters, 0, sizeof(e->counters)); 1276 e->comefrom = 0; 1277 return 0; 1278 1279release_target: 1280 module_put(t->u.kernel.target->me); 1281out: 1282 return ret; 1283} 1284 1285static int 1286compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, 1287 unsigned int *size, const char *name, 1288 struct xt_table_info *newinfo, unsigned char *base) 1289{ 1290 struct xt_entry_target *t; 1291 struct xt_target *target; 1292 struct arpt_entry *de; 1293 unsigned int origsize; 1294 int ret, h; 1295 1296 ret = 0; 1297 origsize = *size; 1298 de = (struct arpt_entry *)*dstptr; 1299 memcpy(de, e, sizeof(struct arpt_entry)); 1300 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1301 1302 *dstptr += sizeof(struct arpt_entry); 1303 *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1304 1305 de->target_offset = e->target_offset - (origsize - *size); 1306 t = compat_arpt_get_target(e); 1307 target = t->u.kernel.target; 1308 xt_compat_target_from_user(t, dstptr, size); 1309 1310 de->next_offset = e->next_offset - (origsize - *size); 1311 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 1312 if ((unsigned char *)de - base < newinfo->hook_entry[h]) 1313 newinfo->hook_entry[h] -= origsize - *size; 1314 if ((unsigned char *)de - base < newinfo->underflow[h]) 1315 newinfo->underflow[h] -= origsize - *size; 1316 } 1317 return ret; 1318} 1319 1320static int translate_compat_table(const char *name, 1321 unsigned int valid_hooks, 1322 struct xt_table_info **pinfo, 1323 void **pentry0, 1324 unsigned int total_size, 1325 unsigned int number, 1326 unsigned int *hook_entries, 1327 unsigned int *underflows) 1328{ 1329 unsigned int i, j; 1330 struct xt_table_info *newinfo, *info; 1331 void *pos, *entry0, *entry1; 1332 struct compat_arpt_entry *iter0; 1333 struct arpt_entry *iter1; 1334 unsigned int size; 1335 int ret = 0; 1336 1337 info = *pinfo; 1338 entry0 = *pentry0; 1339 size = total_size; 1340 info->number = number; 1341 1342 /* Init all hooks to impossible value. */ 1343 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1344 info->hook_entry[i] = 0xFFFFFFFF; 1345 info->underflow[i] = 0xFFFFFFFF; 1346 } 1347 1348 duprintf("translate_compat_table: size %u\n", info->size); 1349 j = 0; 1350 xt_compat_lock(NFPROTO_ARP); 1351 xt_compat_init_offsets(NFPROTO_ARP, number); 1352 /* Walk through entries, checking offsets. */ 1353 xt_entry_foreach(iter0, entry0, total_size) { 1354 ret = check_compat_entry_size_and_hooks(iter0, info, &size, 1355 entry0, 1356 entry0 + total_size, 1357 hook_entries, 1358 underflows, 1359 name); 1360 if (ret != 0) 1361 goto out_unlock; 1362 ++j; 1363 } 1364 1365 ret = -EINVAL; 1366 if (j != number) { 1367 duprintf("translate_compat_table: %u not %u entries\n", 1368 j, number); 1369 goto out_unlock; 1370 } 1371 1372 /* Check hooks all assigned */ 1373 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1374 /* Only hooks which are valid */ 1375 if (!(valid_hooks & (1 << i))) 1376 continue; 1377 if (info->hook_entry[i] == 0xFFFFFFFF) { 1378 duprintf("Invalid hook entry %u %u\n", 1379 i, hook_entries[i]); 1380 goto out_unlock; 1381 } 1382 if (info->underflow[i] == 0xFFFFFFFF) { 1383 duprintf("Invalid underflow %u %u\n", 1384 i, underflows[i]); 1385 goto out_unlock; 1386 } 1387 } 1388 1389 ret = -ENOMEM; 1390 newinfo = xt_alloc_table_info(size); 1391 if (!newinfo) 1392 goto out_unlock; 1393 1394 newinfo->number = number; 1395 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1396 newinfo->hook_entry[i] = info->hook_entry[i]; 1397 newinfo->underflow[i] = info->underflow[i]; 1398 } 1399 entry1 = newinfo->entries[raw_smp_processor_id()]; 1400 pos = entry1; 1401 size = total_size; 1402 xt_entry_foreach(iter0, entry0, total_size) { 1403 ret = compat_copy_entry_from_user(iter0, &pos, &size, 1404 name, newinfo, entry1); 1405 if (ret != 0) 1406 break; 1407 } 1408 xt_compat_flush_offsets(NFPROTO_ARP); 1409 xt_compat_unlock(NFPROTO_ARP); 1410 if (ret) 1411 goto free_newinfo; 1412 1413 ret = -ELOOP; 1414 if (!mark_source_chains(newinfo, valid_hooks, entry1)) 1415 goto free_newinfo; 1416 1417 i = 0; 1418 xt_entry_foreach(iter1, entry1, newinfo->size) { 1419 ret = check_target(iter1, name); 1420 if (ret != 0) 1421 break; 1422 ++i; 1423 if (strcmp(arpt_get_target(iter1)->u.user.name, 1424 XT_ERROR_TARGET) == 0) 1425 ++newinfo->stacksize; 1426 } 1427 if (ret) { 1428 /* 1429 * The first i matches need cleanup_entry (calls ->destroy) 1430 * because they had called ->check already. The other j-i 1431 * entries need only release. 1432 */ 1433 int skip = i; 1434 j -= i; 1435 xt_entry_foreach(iter0, entry0, newinfo->size) { 1436 if (skip-- > 0) 1437 continue; 1438 if (j-- == 0) 1439 break; 1440 compat_release_entry(iter0); 1441 } 1442 xt_entry_foreach(iter1, entry1, newinfo->size) { 1443 if (i-- == 0) 1444 break; 1445 cleanup_entry(iter1); 1446 } 1447 xt_free_table_info(newinfo); 1448 return ret; 1449 } 1450 1451 /* And one copy for every other CPU */ 1452 for_each_possible_cpu(i) 1453 if (newinfo->entries[i] && newinfo->entries[i] != entry1) 1454 memcpy(newinfo->entries[i], entry1, newinfo->size); 1455 1456 *pinfo = newinfo; 1457 *pentry0 = entry1; 1458 xt_free_table_info(info); 1459 return 0; 1460 1461free_newinfo: 1462 xt_free_table_info(newinfo); 1463out: 1464 xt_entry_foreach(iter0, entry0, total_size) { 1465 if (j-- == 0) 1466 break; 1467 compat_release_entry(iter0); 1468 } 1469 return ret; 1470out_unlock: 1471 xt_compat_flush_offsets(NFPROTO_ARP); 1472 xt_compat_unlock(NFPROTO_ARP); 1473 goto out; 1474} 1475 1476struct compat_arpt_replace { 1477 char name[XT_TABLE_MAXNAMELEN]; 1478 u32 valid_hooks; 1479 u32 num_entries; 1480 u32 size; 1481 u32 hook_entry[NF_ARP_NUMHOOKS]; 1482 u32 underflow[NF_ARP_NUMHOOKS]; 1483 u32 num_counters; 1484 compat_uptr_t counters; 1485 struct compat_arpt_entry entries[0]; 1486}; 1487 1488static int compat_do_replace(struct net *net, void __user *user, 1489 unsigned int len) 1490{ 1491 int ret; 1492 struct compat_arpt_replace tmp; 1493 struct xt_table_info *newinfo; 1494 void *loc_cpu_entry; 1495 struct arpt_entry *iter; 1496 1497 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1498 return -EFAULT; 1499 1500 /* overflow check */ 1501 if (tmp.size >= INT_MAX / num_possible_cpus()) 1502 return -ENOMEM; 1503 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1504 return -ENOMEM; 1505 if (tmp.num_counters == 0) 1506 return -EINVAL; 1507 1508 tmp.name[sizeof(tmp.name)-1] = 0; 1509 1510 newinfo = xt_alloc_table_info(tmp.size); 1511 if (!newinfo) 1512 return -ENOMEM; 1513 1514 /* choose the copy that is on our node/cpu */ 1515 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1516 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { 1517 ret = -EFAULT; 1518 goto free_newinfo; 1519 } 1520 1521 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1522 &newinfo, &loc_cpu_entry, tmp.size, 1523 tmp.num_entries, tmp.hook_entry, 1524 tmp.underflow); 1525 if (ret != 0) 1526 goto free_newinfo; 1527 1528 duprintf("compat_do_replace: Translated table\n"); 1529 1530 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1531 tmp.num_counters, compat_ptr(tmp.counters)); 1532 if (ret) 1533 goto free_newinfo_untrans; 1534 return 0; 1535 1536 free_newinfo_untrans: 1537 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) 1538 cleanup_entry(iter); 1539 free_newinfo: 1540 xt_free_table_info(newinfo); 1541 return ret; 1542} 1543 1544static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, 1545 unsigned int len) 1546{ 1547 int ret; 1548 1549 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1550 return -EPERM; 1551 1552 switch (cmd) { 1553 case ARPT_SO_SET_REPLACE: 1554 ret = compat_do_replace(sock_net(sk), user, len); 1555 break; 1556 1557 case ARPT_SO_SET_ADD_COUNTERS: 1558 ret = do_add_counters(sock_net(sk), user, len, 1); 1559 break; 1560 1561 default: 1562 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); 1563 ret = -EINVAL; 1564 } 1565 1566 return ret; 1567} 1568 1569static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, 1570 compat_uint_t *size, 1571 struct xt_counters *counters, 1572 unsigned int i) 1573{ 1574 struct xt_entry_target *t; 1575 struct compat_arpt_entry __user *ce; 1576 u_int16_t target_offset, next_offset; 1577 compat_uint_t origsize; 1578 int ret; 1579 1580 origsize = *size; 1581 ce = (struct compat_arpt_entry __user *)*dstptr; 1582 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || 1583 copy_to_user(&ce->counters, &counters[i], 1584 sizeof(counters[i])) != 0) 1585 return -EFAULT; 1586 1587 *dstptr += sizeof(struct compat_arpt_entry); 1588 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1589 1590 target_offset = e->target_offset - (origsize - *size); 1591 1592 t = arpt_get_target(e); 1593 ret = xt_compat_target_to_user(t, dstptr, size); 1594 if (ret) 1595 return ret; 1596 next_offset = e->next_offset - (origsize - *size); 1597 if (put_user(target_offset, &ce->target_offset) != 0 || 1598 put_user(next_offset, &ce->next_offset) != 0) 1599 return -EFAULT; 1600 return 0; 1601} 1602 1603static int compat_copy_entries_to_user(unsigned int total_size, 1604 struct xt_table *table, 1605 void __user *userptr) 1606{ 1607 struct xt_counters *counters; 1608 const struct xt_table_info *private = table->private; 1609 void __user *pos; 1610 unsigned int size; 1611 int ret = 0; 1612 void *loc_cpu_entry; 1613 unsigned int i = 0; 1614 struct arpt_entry *iter; 1615 1616 counters = alloc_counters(table); 1617 if (IS_ERR(counters)) 1618 return PTR_ERR(counters); 1619 1620 /* choose the copy on our node/cpu */ 1621 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1622 pos = userptr; 1623 size = total_size; 1624 xt_entry_foreach(iter, loc_cpu_entry, total_size) { 1625 ret = compat_copy_entry_to_user(iter, &pos, 1626 &size, counters, i++); 1627 if (ret != 0) 1628 break; 1629 } 1630 vfree(counters); 1631 return ret; 1632} 1633 1634struct compat_arpt_get_entries { 1635 char name[XT_TABLE_MAXNAMELEN]; 1636 compat_uint_t size; 1637 struct compat_arpt_entry entrytable[0]; 1638}; 1639 1640static int compat_get_entries(struct net *net, 1641 struct compat_arpt_get_entries __user *uptr, 1642 int *len) 1643{ 1644 int ret; 1645 struct compat_arpt_get_entries get; 1646 struct xt_table *t; 1647 1648 if (*len < sizeof(get)) { 1649 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); 1650 return -EINVAL; 1651 } 1652 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1653 return -EFAULT; 1654 if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { 1655 duprintf("compat_get_entries: %u != %zu\n", 1656 *len, sizeof(get) + get.size); 1657 return -EINVAL; 1658 } 1659 1660 xt_compat_lock(NFPROTO_ARP); 1661 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 1662 if (!IS_ERR_OR_NULL(t)) { 1663 const struct xt_table_info *private = t->private; 1664 struct xt_table_info info; 1665 1666 duprintf("t->private->number = %u\n", private->number); 1667 ret = compat_table_info(private, &info); 1668 if (!ret && get.size == info.size) { 1669 ret = compat_copy_entries_to_user(private->size, 1670 t, uptr->entrytable); 1671 } else if (!ret) { 1672 duprintf("compat_get_entries: I've got %u not %u!\n", 1673 private->size, get.size); 1674 ret = -EAGAIN; 1675 } 1676 xt_compat_flush_offsets(NFPROTO_ARP); 1677 module_put(t->me); 1678 xt_table_unlock(t); 1679 } else 1680 ret = t ? PTR_ERR(t) : -ENOENT; 1681 1682 xt_compat_unlock(NFPROTO_ARP); 1683 return ret; 1684} 1685 1686static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); 1687 1688static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, 1689 int *len) 1690{ 1691 int ret; 1692 1693 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1694 return -EPERM; 1695 1696 switch (cmd) { 1697 case ARPT_SO_GET_INFO: 1698 ret = get_info(sock_net(sk), user, len, 1); 1699 break; 1700 case ARPT_SO_GET_ENTRIES: 1701 ret = compat_get_entries(sock_net(sk), user, len); 1702 break; 1703 default: 1704 ret = do_arpt_get_ctl(sk, cmd, user, len); 1705 } 1706 return ret; 1707} 1708#endif 1709 1710static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 1711{ 1712 int ret; 1713 1714 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1715 return -EPERM; 1716 1717 switch (cmd) { 1718 case ARPT_SO_SET_REPLACE: 1719 ret = do_replace(sock_net(sk), user, len); 1720 break; 1721 1722 case ARPT_SO_SET_ADD_COUNTERS: 1723 ret = do_add_counters(sock_net(sk), user, len, 0); 1724 break; 1725 1726 default: 1727 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); 1728 ret = -EINVAL; 1729 } 1730 1731 return ret; 1732} 1733 1734static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1735{ 1736 int ret; 1737 1738 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1739 return -EPERM; 1740 1741 switch (cmd) { 1742 case ARPT_SO_GET_INFO: 1743 ret = get_info(sock_net(sk), user, len, 0); 1744 break; 1745 1746 case ARPT_SO_GET_ENTRIES: 1747 ret = get_entries(sock_net(sk), user, len); 1748 break; 1749 1750 case ARPT_SO_GET_REVISION_TARGET: { 1751 struct xt_get_revision rev; 1752 1753 if (*len != sizeof(rev)) { 1754 ret = -EINVAL; 1755 break; 1756 } 1757 if (copy_from_user(&rev, user, sizeof(rev)) != 0) { 1758 ret = -EFAULT; 1759 break; 1760 } 1761 rev.name[sizeof(rev.name)-1] = 0; 1762 1763 try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, 1764 rev.revision, 1, &ret), 1765 "arpt_%s", rev.name); 1766 break; 1767 } 1768 1769 default: 1770 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); 1771 ret = -EINVAL; 1772 } 1773 1774 return ret; 1775} 1776 1777struct xt_table *arpt_register_table(struct net *net, 1778 const struct xt_table *table, 1779 const struct arpt_replace *repl) 1780{ 1781 int ret; 1782 struct xt_table_info *newinfo; 1783 struct xt_table_info bootstrap = {0}; 1784 void *loc_cpu_entry; 1785 struct xt_table *new_table; 1786 1787 newinfo = xt_alloc_table_info(repl->size); 1788 if (!newinfo) { 1789 ret = -ENOMEM; 1790 goto out; 1791 } 1792 1793 /* choose the copy on our node/cpu */ 1794 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1795 memcpy(loc_cpu_entry, repl->entries, repl->size); 1796 1797 ret = translate_table(newinfo, loc_cpu_entry, repl); 1798 duprintf("arpt_register_table: translate table gives %d\n", ret); 1799 if (ret != 0) 1800 goto out_free; 1801 1802 new_table = xt_register_table(net, table, &bootstrap, newinfo); 1803 if (IS_ERR(new_table)) { 1804 ret = PTR_ERR(new_table); 1805 goto out_free; 1806 } 1807 return new_table; 1808 1809out_free: 1810 xt_free_table_info(newinfo); 1811out: 1812 return ERR_PTR(ret); 1813} 1814 1815void arpt_unregister_table(struct xt_table *table) 1816{ 1817 struct xt_table_info *private; 1818 void *loc_cpu_entry; 1819 struct module *table_owner = table->me; 1820 struct arpt_entry *iter; 1821 1822 private = xt_unregister_table(table); 1823 1824 /* Decrease module usage counts and free resources */ 1825 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1826 xt_entry_foreach(iter, loc_cpu_entry, private->size) 1827 cleanup_entry(iter); 1828 if (private->number > private->initial_entries) 1829 module_put(table_owner); 1830 xt_free_table_info(private); 1831} 1832 1833/* The built-in targets: standard (NULL) and error. */ 1834static struct xt_target arpt_builtin_tg[] __read_mostly = { 1835 { 1836 .name = XT_STANDARD_TARGET, 1837 .targetsize = sizeof(int), 1838 .family = NFPROTO_ARP, 1839#ifdef CONFIG_COMPAT 1840 .compatsize = sizeof(compat_int_t), 1841 .compat_from_user = compat_standard_from_user, 1842 .compat_to_user = compat_standard_to_user, 1843#endif 1844 }, 1845 { 1846 .name = XT_ERROR_TARGET, 1847 .target = arpt_error, 1848 .targetsize = XT_FUNCTION_MAXNAMELEN, 1849 .family = NFPROTO_ARP, 1850 }, 1851}; 1852 1853static struct nf_sockopt_ops arpt_sockopts = { 1854 .pf = PF_INET, 1855 .set_optmin = ARPT_BASE_CTL, 1856 .set_optmax = ARPT_SO_SET_MAX+1, 1857 .set = do_arpt_set_ctl, 1858#ifdef CONFIG_COMPAT 1859 .compat_set = compat_do_arpt_set_ctl, 1860#endif 1861 .get_optmin = ARPT_BASE_CTL, 1862 .get_optmax = ARPT_SO_GET_MAX+1, 1863 .get = do_arpt_get_ctl, 1864#ifdef CONFIG_COMPAT 1865 .compat_get = compat_do_arpt_get_ctl, 1866#endif 1867 .owner = THIS_MODULE, 1868}; 1869 1870static int __net_init arp_tables_net_init(struct net *net) 1871{ 1872 return xt_proto_init(net, NFPROTO_ARP); 1873} 1874 1875static void __net_exit arp_tables_net_exit(struct net *net) 1876{ 1877 xt_proto_fini(net, NFPROTO_ARP); 1878} 1879 1880static struct pernet_operations arp_tables_net_ops = { 1881 .init = arp_tables_net_init, 1882 .exit = arp_tables_net_exit, 1883}; 1884 1885static int __init arp_tables_init(void) 1886{ 1887 int ret; 1888 1889 ret = register_pernet_subsys(&arp_tables_net_ops); 1890 if (ret < 0) 1891 goto err1; 1892 1893 /* No one else will be downing sem now, so we won't sleep */ 1894 ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1895 if (ret < 0) 1896 goto err2; 1897 1898 /* Register setsockopt */ 1899 ret = nf_register_sockopt(&arpt_sockopts); 1900 if (ret < 0) 1901 goto err4; 1902 1903 printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); 1904 return 0; 1905 1906err4: 1907 xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1908err2: 1909 unregister_pernet_subsys(&arp_tables_net_ops); 1910err1: 1911 return ret; 1912} 1913 1914static void __exit arp_tables_fini(void) 1915{ 1916 nf_unregister_sockopt(&arpt_sockopts); 1917 xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1918 unregister_pernet_subsys(&arp_tables_net_ops); 1919} 1920 1921EXPORT_SYMBOL(arpt_register_table); 1922EXPORT_SYMBOL(arpt_unregister_table); 1923EXPORT_SYMBOL(arpt_do_table); 1924 1925module_init(arp_tables_init); 1926module_exit(arp_tables_fini); 1927