nmask 431 arch/powerpc/include/asm/processor.h extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); nmask 55 drivers/clk/clk-fractional-divider.c n = (val & fd->nmask) >> fd->nshift; nmask 136 drivers/clk/clk-fractional-divider.c val &= ~(fd->mmask | fd->nmask); nmask 181 drivers/clk/clk-fractional-divider.c fd->nmask = GENMASK(nwidth - 1, 0) << nshift; nmask 59 drivers/clk/imx/clk-composite-7ulp.c fd->nmask = PCG_PCD_MASK; nmask 253 drivers/clk/rockchip/clk.c div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; nmask 105 drivers/ntb/test/ntb_pingpong.c u64 nmask; nmask 124 drivers/ntb/test/ntb_pingpong.c if (link & pp->nmask) { nmask 125 drivers/ntb/test/ntb_pingpong.c pidx = __ffs64(link & pp->nmask); nmask 308 drivers/ntb/test/ntb_pingpong.c pp->nmask = GENMASK_ULL(pcnt - 1, pidx); nmask 311 drivers/ntb/test/ntb_pingpong.c pp->in_db, pp->pmask, pp->nmask); nmask 513 drivers/scsi/cxlflash/sislite.h u8 nmask; nmask 462 drivers/scsi/cxlflash/superpipe.c if (unlikely(rhte->nmask == 0)) { nmask 490 drivers/scsi/cxlflash/superpipe.c if (ctxi->rht_start[i].nmask == 0) { nmask 513 drivers/scsi/cxlflash/superpipe.c rhte->nmask = 0; nmask 1039 drivers/scsi/cxlflash/vlun.c rhte->nmask = MC_RHT_NMASK; nmask 1241 drivers/scsi/cxlflash/vlun.c if (ctxi_dst->rht_start[i].nmask != 0) { nmask 1292 drivers/scsi/cxlflash/vlun.c if (ctxi_src->rht_start[i].nmask == 0) nmask 1297 drivers/scsi/cxlflash/vlun.c ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask; nmask 429 drivers/scsi/pmcraid.c u32 nmask = gmask | GLOBAL_INTERRUPT_MASK; nmask 432 drivers/scsi/pmcraid.c iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); nmask 457 drivers/scsi/pmcraid.c u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK); nmask 459 drivers/scsi/pmcraid.c iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); nmask 649 include/linux/clk-provider.h u32 nmask; nmask 792 include/linux/compat.h compat_ulong_t __user *nmask, nmask 795 include/linux/compat.h compat_ulong_t __user *nmask, nmask 799 include/linux/compat.h asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, nmask 364 include/linux/hugetlb.h nodemask_t *nmask); nmask 368 include/linux/hugetlb.h int nid, nodemask_t *nmask); nmask 609 include/linux/hugetlb.h alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) nmask 882 include/linux/syscalls.h const unsigned long __user *nmask, nmask 886 include/linux/syscalls.h unsigned long __user *nmask, nmask 889 include/linux/syscalls.h asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask, nmask 895 mm/hugetlb.c nodemask_t *nmask) nmask 907 mm/hugetlb.c for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { nmask 1456 mm/hugetlb.c gfp_t gfp_mask, int nid, nodemask_t *nmask, nmask 1477 mm/hugetlb.c page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); nmask 1507 mm/hugetlb.c gfp_t gfp_mask, int nid, nodemask_t *nmask, nmask 1513 mm/hugetlb.c page = alloc_gigantic_page(h, gfp_mask, nid, nmask); nmask 1516 mm/hugetlb.c nid, nmask, node_alloc_noretry); nmask 1672 mm/hugetlb.c int nid, nodemask_t *nmask) nmask 1684 mm/hugetlb.c page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); nmask 1713 mm/hugetlb.c int nid, nodemask_t *nmask) nmask 1720 mm/hugetlb.c page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); nmask 1775 mm/hugetlb.c nodemask_t *nmask) nmask 1783 mm/hugetlb.c page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); nmask 1791 mm/hugetlb.c return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); nmask 1296 mm/memory_hotplug.c nodemask_t nmask = node_states[N_MEMORY]; nmask 1303 mm/memory_hotplug.c node_clear(nid, nmask); nmask 1304 mm/memory_hotplug.c if (nodes_empty(nmask)) nmask 1305 mm/memory_hotplug.c node_set(nid, nmask); nmask 1307 mm/memory_hotplug.c return new_page_nodemask(page, nid, &nmask); nmask 412 mm/mempolicy.c nodemask_t *nmask; nmask 428 mm/mempolicy.c return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); nmask 687 mm/mempolicy.c .nmask = nodes, nmask 871 mm/mempolicy.c static long do_get_mempolicy(int *policy, nodemask_t *nmask, nmask 888 mm/mempolicy.c *nmask = cpuset_current_mems_allowed; nmask 948 mm/mempolicy.c if (nmask) { nmask 950 mm/mempolicy.c *nmask = pol->w.user_nodemask; nmask 953 mm/mempolicy.c get_policy_nodemask(pol, nmask); nmask 1027 mm/mempolicy.c nodemask_t nmask; nmask 1031 mm/mempolicy.c nodes_clear(nmask); nmask 1032 mm/mempolicy.c node_set(source, nmask); nmask 1040 mm/mempolicy.c queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, nmask 1219 mm/mempolicy.c nodemask_t *nmask, unsigned long flags) nmask 1247 mm/mempolicy.c new = mpol_new(mode, mode_flags, nmask); nmask 1263 mm/mempolicy.c nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); nmask 1276 mm/mempolicy.c err = mpol_set_nodemask(new, nmask, scratch); nmask 1287 mm/mempolicy.c ret = queue_pages_range(mm, start, end, nmask, nmask 1327 mm/mempolicy.c static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, nmask 1337 mm/mempolicy.c if (maxnode == 0 || !nmask) nmask 1359 mm/mempolicy.c if (get_user(t, nmask + k)) nmask 1375 mm/mempolicy.c if (get_user(t, nmask + nlongs - 1)) nmask 1381 mm/mempolicy.c if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) nmask 1405 mm/mempolicy.c unsigned long mode, const unsigned long __user *nmask, nmask 1420 mm/mempolicy.c err = get_nodes(&nodes, nmask, maxnode); nmask 1427 mm/mempolicy.c unsigned long, mode, const unsigned long __user *, nmask, nmask 1430 mm/mempolicy.c return kernel_mbind(start, len, mode, nmask, maxnode, flags); nmask 1434 mm/mempolicy.c static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, nmask 1447 mm/mempolicy.c err = get_nodes(&nodes, nmask, maxnode); nmask 1453 mm/mempolicy.c SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, nmask 1456 mm/mempolicy.c return kernel_set_mempolicy(mode, nmask, maxnode); nmask 1557 mm/mempolicy.c unsigned long __user *nmask, nmask 1568 mm/mempolicy.c if (nmask != NULL && maxnode < nr_node_ids) nmask 1579 mm/mempolicy.c if (nmask) nmask 1580 mm/mempolicy.c err = copy_nodes_to_user(nmask, maxnode, &nodes); nmask 1586 mm/mempolicy.c unsigned long __user *, nmask, unsigned long, maxnode, nmask 1589 mm/mempolicy.c return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); nmask 1595 mm/mempolicy.c compat_ulong_t __user *, nmask, nmask 1607 mm/mempolicy.c if (nmask) nmask 1612 mm/mempolicy.c if (!err && nmask) { nmask 1617 mm/mempolicy.c err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); nmask 1618 mm/mempolicy.c err |= compat_put_bitmap(nmask, bm, nr_bits); nmask 1624 mm/mempolicy.c COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, nmask 1634 mm/mempolicy.c if (nmask) { nmask 1635 mm/mempolicy.c if (compat_get_bitmap(bm, nmask, nr_bits)) nmask 1646 mm/mempolicy.c compat_ulong_t, mode, compat_ulong_t __user *, nmask, nmask 1656 mm/mempolicy.c if (nmask) { nmask 1657 mm/mempolicy.c if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) nmask 2104 mm/mempolicy.c nodemask_t *nmask; nmask 2133 mm/mempolicy.c nmask = policy_nodemask(gfp, pol); nmask 2134 mm/mempolicy.c if (!nmask || node_isset(hpage_node, *nmask)) { nmask 2153 mm/mempolicy.c nmask = policy_nodemask(gfp, pol); nmask 2155 mm/mempolicy.c page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); nmask 662 net/sched/sch_api.c unsigned int nsize, nmask, osize; nmask 669 net/sched/sch_api.c nmask = nsize - 1; nmask 680 net/sched/sch_api.c h = qdisc_class_hash(cl->classid, nmask); nmask 686 net/sched/sch_api.c clhash->hashmask = nmask;