1 #ifndef _LINUX_MEMPOLICY_H
2 #define _LINUX_MEMPOLICY_H 1
4 #include <linux/errno.h>
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
23 MPOL_MAX, /* always last member of enum */
26 /* Flags for set_mempolicy */
28 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
29 * either set_mempolicy() or mbind().
31 #define MPOL_MODE_FLAGS (0)
33 /* Flags for get_mempolicy */
34 #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
35 #define MPOL_F_ADDR (1<<1) /* look up vma using address */
36 #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
39 #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
40 #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
41 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
42 #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
46 #include <linux/mmzone.h>
47 #include <linux/slab.h>
48 #include <linux/rbtree.h>
49 #include <linux/spinlock.h>
50 #include <linux/nodemask.h>
52 struct vm_area_struct;
58 * Describe a memory policy.
60 * A mempolicy can be either associated with a process or with a VMA.
61 * For VMA related allocations the VMA policy is preferred, otherwise
62 * the process policy is used. Interrupts ignore the memory policy
63 * of the current process.
65 * Locking policy for interlave:
66 * In process context there is no locking because only the process accesses
67 * its own state. All vma manipulation is somewhat protected by a down_read on
71 * Mempolicy objects are reference counted. A mempolicy will be freed when
72 * mpol_free() decrements the reference count to zero.
74 * Copying policy objects:
75 * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
76 * to the new storage. The reference count of the new object is initialized
77 * to 1, representing the caller of mpol_copy().
81 unsigned short policy; /* See MPOL_* above */
82 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
84 short preferred_node; /* preferred */
85 nodemask_t nodes; /* interleave/bind */
86 /* undefined for default */
88 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
92 * Support for managing mempolicy data objects (clone, copy, destroy)
93 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
96 extern void __mpol_free(struct mempolicy *pol);
97 static inline void mpol_free(struct mempolicy *pol)
103 extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
104 static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
107 pol = __mpol_copy(pol);
111 #define vma_policy(vma) ((vma)->vm_policy)
112 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
114 static inline void mpol_get(struct mempolicy *pol)
117 atomic_inc(&pol->refcnt);
120 extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
121 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
125 return __mpol_equal(a, b);
127 #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
129 /* Could later add inheritance of the process policy here. */
131 #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
134 * Tree of shared policies for a shared memory region.
135 * Maintain the policies in a pseudo mm that contains vmas. The vmas
136 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
137 * bytes, so that we can work with shared memory segments bigger than
143 unsigned long start, end;
144 struct mempolicy *policy;
147 struct shared_policy {
152 void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
153 unsigned short flags, nodemask_t *nodes);
154 int mpol_set_shared_policy(struct shared_policy *info,
155 struct vm_area_struct *vma,
156 struct mempolicy *new);
157 void mpol_free_shared_policy(struct shared_policy *p);
158 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
161 extern void numa_default_policy(void);
162 extern void numa_policy_init(void);
163 extern void mpol_rebind_task(struct task_struct *tsk,
164 const nodemask_t *new);
165 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
166 extern void mpol_fix_fork_child_flag(struct task_struct *p);
168 extern struct mempolicy default_policy;
169 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
170 unsigned long addr, gfp_t gfp_flags,
171 struct mempolicy **mpol, nodemask_t **nodemask);
172 extern unsigned slab_node(struct mempolicy *policy);
174 extern enum zone_type policy_zone;
176 static inline void check_highest_zone(enum zone_type k)
178 if (k > policy_zone && k != ZONE_MOVABLE)
182 int do_migrate_pages(struct mm_struct *mm,
183 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
189 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
193 #define vma_mpol_equal(a,b) 1
195 #define mpol_set_vma_default(vma) do {} while(0)
197 static inline void mpol_free(struct mempolicy *p)
201 static inline void mpol_get(struct mempolicy *pol)
205 static inline struct mempolicy *mpol_copy(struct mempolicy *old)
210 struct shared_policy {};
212 static inline int mpol_set_shared_policy(struct shared_policy *info,
213 struct vm_area_struct *vma,
214 struct mempolicy *new)
219 static inline void mpol_shared_policy_init(struct shared_policy *info,
220 unsigned short policy, unsigned short flags, nodemask_t *nodes)
224 static inline void mpol_free_shared_policy(struct shared_policy *p)
228 static inline struct mempolicy *
229 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
234 #define vma_policy(vma) NULL
235 #define vma_set_policy(vma, pol) do {} while(0)
237 static inline void numa_policy_init(void)
241 static inline void numa_default_policy(void)
245 static inline void mpol_rebind_task(struct task_struct *tsk,
246 const nodemask_t *new)
250 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
254 static inline void mpol_fix_fork_child_flag(struct task_struct *p)
258 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
259 unsigned long addr, gfp_t gfp_flags,
260 struct mempolicy **mpol, nodemask_t **nodemask)
264 return node_zonelist(0, gfp_flags);
267 static inline int do_migrate_pages(struct mm_struct *mm,
268 const nodemask_t *from_nodes,
269 const nodemask_t *to_nodes, int flags)
274 static inline void check_highest_zone(int k)
277 #endif /* CONFIG_NUMA */
278 #endif /* __KERNEL__ */