newmask 44 arch/alpha/kernel/signal.c SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) newmask 50 arch/alpha/kernel/signal.c siginitset(&mask, newmask & _BLOCKABLE); newmask 333 arch/x86/kernel/cpu/resctrl/rdtgroup.c static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, newmask 340 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); newmask 347 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); newmask 358 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); newmask 371 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_copy(&rdtgrp->cpu_mask, newmask); newmask 386 arch/x86/kernel/cpu/resctrl/rdtgroup.c static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, newmask 393 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); newmask 412 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); newmask 425 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_copy(&rdtgrp->cpu_mask, newmask); newmask 444 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_var_t tmpmask, newmask, tmpmask1; newmask 453 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { newmask 459 arch/x86/kernel/cpu/resctrl/rdtgroup.c free_cpumask_var(newmask); newmask 477 arch/x86/kernel/cpu/resctrl/rdtgroup.c ret = cpulist_parse(buf, newmask); newmask 479 arch/x86/kernel/cpu/resctrl/rdtgroup.c ret = cpumask_parse(buf, newmask); newmask 487 arch/x86/kernel/cpu/resctrl/rdtgroup.c cpumask_andnot(tmpmask, newmask, cpu_online_mask); newmask 495 arch/x86/kernel/cpu/resctrl/rdtgroup.c ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); newmask 497 arch/x86/kernel/cpu/resctrl/rdtgroup.c ret = cpus_mon_write(rdtgrp, newmask, tmpmask); newmask 504 arch/x86/kernel/cpu/resctrl/rdtgroup.c free_cpumask_var(newmask); newmask 264 drivers/mmc/host/s3cmci.c u32 newmask; newmask 266 drivers/mmc/host/s3cmci.c newmask = readl(host->base + host->sdiimsk); newmask 267 drivers/mmc/host/s3cmci.c newmask |= imask; newmask 269 drivers/mmc/host/s3cmci.c writel(newmask, host->base + host->sdiimsk); newmask 271 drivers/mmc/host/s3cmci.c return newmask; newmask 276 drivers/mmc/host/s3cmci.c u32 newmask; newmask 278 drivers/mmc/host/s3cmci.c newmask = readl(host->base + host->sdiimsk); newmask 279 drivers/mmc/host/s3cmci.c newmask &= ~imask; newmask 281 drivers/mmc/host/s3cmci.c writel(newmask, host->base + host->sdiimsk); newmask 283 drivers/mmc/host/s3cmci.c return newmask; newmask 1139 include/linux/syscalls.h asmlinkage long sys_ssetmask(int newmask); newmask 1120 kernel/cgroup/cpuset.c struct cpumask *newmask, newmask 1136 kernel/cgroup/cpuset.c (newmask && cpumask_empty(newmask)) || newmask 1137 kernel/cgroup/cpuset.c (!newmask && cpumask_empty(cpuset->cpus_allowed))) newmask 1167 kernel/cgroup/cpuset.c } else if (newmask) { newmask 1175 kernel/cgroup/cpuset.c cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); newmask 1179 kernel/cgroup/cpuset.c cpumask_and(tmp->addmask, newmask, parent->effective_cpus); newmask 1747 kernel/sched/sched.h const struct cpumask *newmask); newmask 4397 kernel/signal.c SYSCALL_DEFINE1(ssetmask, int, newmask) newmask 4402 kernel/signal.c siginitset(&newset, newmask); newmask 349 mm/mempolicy.c static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) newmask 354 mm/mempolicy.c nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) newmask 357 mm/mempolicy.c mpol_ops[pol->mode].rebind(pol, newmask); newmask 1764 net/openvswitch/flow_netlink.c struct nlattr *newmask = NULL; newmask 1793 net/openvswitch/flow_netlink.c newmask = kmemdup(nla_key, newmask 1796 net/openvswitch/flow_netlink.c if (!newmask) newmask 1799 net/openvswitch/flow_netlink.c mask_set_nlattr(newmask, 0xff); newmask 1808 net/openvswitch/flow_netlink.c nla_mask = newmask; newmask 1833 net/openvswitch/flow_netlink.c kfree(newmask); newmask 1375 net/sched/cls_flower.c struct fl_flow_mask *newmask; newmask 1378 net/sched/cls_flower.c newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); newmask 1379 net/sched/cls_flower.c if (!newmask) newmask 1382 net/sched/cls_flower.c fl_mask_copy(newmask, mask); newmask 1384 net/sched/cls_flower.c if ((newmask->key.tp_range.tp_min.dst && newmask 1385 net/sched/cls_flower.c newmask->key.tp_range.tp_max.dst) || newmask 1386 net/sched/cls_flower.c (newmask->key.tp_range.tp_min.src && newmask 1387 net/sched/cls_flower.c newmask->key.tp_range.tp_max.src)) newmask 1388 net/sched/cls_flower.c newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; newmask 1390 net/sched/cls_flower.c err = fl_init_mask_hashtable(newmask); newmask 1394 net/sched/cls_flower.c fl_init_dissector(&newmask->dissector, &newmask->key); newmask 1396 net/sched/cls_flower.c INIT_LIST_HEAD_RCU(&newmask->filters); newmask 1398 net/sched/cls_flower.c refcount_set(&newmask->refcnt, 1); newmask 1400 net/sched/cls_flower.c &newmask->ht_node, mask_ht_params); newmask 1405 net/sched/cls_flower.c list_add_tail_rcu(&newmask->list, &head->masks); newmask 1408 net/sched/cls_flower.c return newmask; newmask 1411 net/sched/cls_flower.c rhashtable_destroy(&newmask->ht); newmask 1413 net/sched/cls_flower.c kfree(newmask); newmask 1423 net/sched/cls_flower.c struct fl_flow_mask *newmask; newmask 1443 net/sched/cls_flower.c newmask = fl_create_new_mask(head, mask); newmask 1444 net/sched/cls_flower.c if (IS_ERR(newmask)) { newmask 1445 net/sched/cls_flower.c ret = PTR_ERR(newmask); newmask 1449 net/sched/cls_flower.c fnew->mask = newmask;