fold 98 drivers/net/ethernet/freescale/enetc/enetc_pf.c u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; fold 107 drivers/net/ethernet/freescale/enetc/enetc_pf.c res |= (hweight64(fold & (mask << i)) & 0x1) << i; fold 318 include/linux/etherdevice.h u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) | fold 321 include/linux/etherdevice.h return fold == 0; fold 348 include/linux/etherdevice.h u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); fold 351 include/linux/etherdevice.h return (fold >> 16) == 0; fold 353 include/linux/etherdevice.h return (fold << 16) == 0; fold 501 include/linux/etherdevice.h unsigned long fold; fold 511 include/linux/etherdevice.h fold = *(unsigned long *)a ^ *(unsigned long *)b; fold 512 include/linux/etherdevice.h fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); fold 513 include/linux/etherdevice.h return fold; fold 7291 kernel/bpf/verifier.c struct bpf_func_state *fold, *fcur; fold 7297 kernel/bpf/verifier.c fold = old->frame[fr]; fold 7300 kernel/bpf/verifier.c if (memcmp(&fold->regs[i], &fcur->regs[i], fold 178 net/sched/cls_basic.c struct basic_filter *fold = (struct basic_filter *) *arg; fold 189 net/sched/cls_basic.c if (fold != NULL) { fold 190 net/sched/cls_basic.c if (handle && fold->handle != handle) fold 206 net/sched/cls_basic.c } else if (!fold) { fold 222 net/sched/cls_basic.c if (!fold) fold 229 net/sched/cls_basic.c if (fold) { fold 231 net/sched/cls_basic.c list_replace_rcu(&fold->link, &fnew->link); fold 232 net/sched/cls_basic.c tcf_unbind_filter(tp, &fold->res); fold 233 net/sched/cls_basic.c tcf_exts_get_net(&fold->exts); fold 234 net/sched/cls_basic.c tcf_queue_work(&fold->rwork, basic_delete_filter_work); fold 394 net/sched/cls_flow.c struct flow_filter *fold, *fnew; fold 450 net/sched/cls_flow.c fold = *arg; fold 451 net/sched/cls_flow.c if (fold) { fold 453 net/sched/cls_flow.c if (fold->handle != handle && handle) fold 457 net/sched/cls_flow.c fnew->tp = fold->tp; fold 458 net/sched/cls_flow.c fnew->handle = fold->handle; fold 459 net/sched/cls_flow.c fnew->nkeys = fold->nkeys; fold 460 net/sched/cls_flow.c fnew->keymask = fold->keymask; fold 461 net/sched/cls_flow.c fnew->mode = fold->mode; fold 462 net/sched/cls_flow.c fnew->mask = fold->mask; fold 463 net/sched/cls_flow.c fnew->xor = fold->xor; fold 464 net/sched/cls_flow.c fnew->rshift = fold->rshift; fold 465 net/sched/cls_flow.c fnew->addend = fold->addend; fold 466 net/sched/cls_flow.c fnew->divisor = fold->divisor; fold 467 net/sched/cls_flow.c fnew->baseclass = fold->baseclass; fold 468 net/sched/cls_flow.c fnew->hashrnd = fold->hashrnd; fold 470 net/sched/cls_flow.c mode = fold->mode; fold 477 net/sched/cls_flow.c perturb_period = fold->perturb_period; fold 548 net/sched/cls_flow.c list_replace_rcu(&fold->list, &fnew->list); fold 552 net/sched/cls_flow.c if (fold) { fold 553 net/sched/cls_flow.c tcf_exts_get_net(&fold->exts); fold 554 net/sched/cls_flow.c tcf_queue_work(&fold->rwork, flow_destroy_filter_work); fold 1420 net/sched/cls_flower.c struct cls_fl_filter *fold, fold 1438 net/sched/cls_flower.c if (fold) { fold 1453 net/sched/cls_flower.c } else if (fold && fold->mask != fnew->mask) { fold 1507 net/sched/cls_flower.c struct cls_fl_filter *fold, fold 1521 net/sched/cls_flower.c return fold && err == -EEXIST ? 0 : err; fold 1535 net/sched/cls_flower.c struct cls_fl_filter *fold = *arg; fold 1564 net/sched/cls_flower.c if (fold && handle && fold->handle != handle) { fold 1595 net/sched/cls_flower.c err = fl_check_assign_mask(head, fnew, fold, mask); fold 1599 net/sched/cls_flower.c err = fl_ht_insert_unique(fnew, fold, &in_ht); fold 1622 net/sched/cls_flower.c if (fold) { fold 1624 net/sched/cls_flower.c if (fold->deleted) { fold 1644 net/sched/cls_flower.c rhashtable_remove_fast(&fold->mask->ht, fold 1645 net/sched/cls_flower.c &fold->ht_node, fold 1646 net/sched/cls_flower.c fold->mask->filter_ht_params); fold 1648 net/sched/cls_flower.c list_replace_rcu(&fold->list, &fnew->list); fold 1649 net/sched/cls_flower.c fold->deleted = true; fold 1653 net/sched/cls_flower.c fl_mask_put(head, fold->mask); fold 1654 net/sched/cls_flower.c if (!tc_skip_hw(fold->flags)) fold 1655 net/sched/cls_flower.c fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); fold 1656 net/sched/cls_flower.c tcf_unbind_filter(tp, &fold->res); fold 1660 net/sched/cls_flower.c refcount_dec(&fold->refcnt); fold 1661 net/sched/cls_flower.c __fl_put(fold); fold 1715 net/sched/cls_flower.c if (fold) fold 1716 net/sched/cls_flower.c __fl_put(fold); fold 472 net/sched/cls_route.c struct route4_filter *fold, *f1, *pfp, *f = NULL; fold 488 net/sched/cls_route.c fold = *arg; fold 489 net/sched/cls_route.c if (fold && handle && fold->handle != handle) fold 501 net/sched/cls_route.c if (fold) { fold 502 net/sched/cls_route.c f->id = fold->id; fold 503 net/sched/cls_route.c f->iif = fold->iif; fold 504 net/sched/cls_route.c f->res = fold->res; fold 505 net/sched/cls_route.c f->handle = fold->handle; fold 507 net/sched/cls_route.c f->tp = fold->tp; fold 508 net/sched/cls_route.c f->bkt = fold->bkt; fold 529 net/sched/cls_route.c if (fold && fold->handle && f->handle != fold->handle) { fold 530 net/sched/cls_route.c th = to_hash(fold->handle); fold 531 net/sched/cls_route.c h = from_hash(fold->handle >> 16); fold 537 net/sched/cls_route.c if (pfp == fold) { fold 538 net/sched/cls_route.c rcu_assign_pointer(*fp, fold->next); fold 547 net/sched/cls_route.c if (fold) { fold 548 net/sched/cls_route.c tcf_unbind_filter(tp, &fold->res); fold 549 net/sched/cls_route.c tcf_exts_get_net(&fold->exts); fold 550 net/sched/cls_route.c tcf_queue_work(&fold->rwork, route4_delete_filter_work); fold 1022 net/wireless/scan.c u8 fold = 0; fold 1037 net/wireless/scan.c fold |= ie[2 + i]; fold 1039 net/wireless/scan.c if (fold) {