Home
last modified time | relevance | path

Searched refs:mpol (Results 1 – 6 of 6) sorted by relevance

/linux-4.4.14/include/linux/
Dmempolicy.h129 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
150 struct mempolicy **mpol, nodemask_t **nodemask);
169 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
223 struct mempolicy *mpol) in mpol_shared_policy_init() argument
259 struct mempolicy **mpol, nodemask_t **nodemask) in huge_zonelist() argument
261 *mpol = NULL; in huge_zonelist()
282 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument
Dshmem_fs.h34 struct mempolicy *mpol; /* default memory policy for mappings */ member
/linux-4.4.14/Documentation/filesystems/
Dtmpfs.txt85 mpol=default use the process allocation policy
87 mpol=prefer:Node prefers to allocate memory from the given Node
88 mpol=bind:NodeList allocates memory only from nodes in NodeList
89 mpol=interleave prefers to allocate from each node in turn
90 mpol=interleave:NodeList allocates from each node of NodeList in turn
91 mpol=local prefers to allocate memory from the local node
95 largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
114 For example, mpol=bind=static:NodeList, is the equivalent of an
117 Note that trying to mount a tmpfs with an mpol option will fail if the
122 online, then it is advisable to omit the mpol option from automatic
[all …]
/linux-4.4.14/mm/
Dshmem.c871 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument
875 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
878 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol()
885 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() local
886 if (sbinfo->mpol) { in shmem_get_sbmpol()
888 mpol = sbinfo->mpol; in shmem_get_sbmpol()
889 mpol_get(mpol); in shmem_get_sbmpol()
892 return mpol; in shmem_get_sbmpol()
939 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument
1365 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shmem_set_policy() argument
[all …]
Dmempolicy.c1807 gfp_t gfp_flags, struct mempolicy **mpol, in huge_zonelist() argument
1812 *mpol = get_vma_policy(vma, addr); in huge_zonelist()
1815 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { in huge_zonelist()
1816 zl = node_zonelist(interleave_nid(*mpol, vma, addr, in huge_zonelist()
1819 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); in huge_zonelist()
1820 if ((*mpol)->mode == MPOL_BIND) in huge_zonelist()
1821 *nodemask = &(*mpol)->v.nodes; in huge_zonelist()
2429 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) in mpol_shared_policy_init() argument
2436 if (mpol) { in mpol_shared_policy_init()
2444 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); in mpol_shared_policy_init()
[all …]
Dhugetlb.c881 struct mempolicy *mpol; in dequeue_huge_page_vma() local
904 htlb_alloc_mask(h), &mpol, &nodemask); in dequeue_huge_page_vma()
923 mpol_cond_put(mpol); in dequeue_huge_page_vma()
1493 struct mempolicy *mpol; in __hugetlb_alloc_buddy_huge_page() local
1498 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask); in __hugetlb_alloc_buddy_huge_page()
1499 mpol_cond_put(mpol); in __hugetlb_alloc_buddy_huge_page()