#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
+/*
+ * Internal flags that share the struct mempolicy flags word with
+ * "mode flags". These flags are allocated from bit 0 up, as they
+ * are never OR'ed into the mode in mempolicy API arguments.
+ */
+#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
+#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
+
#ifdef __KERNEL__
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/nodemask.h>
+#include <linux/pagemap.h>
struct mm_struct;
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
- * mpol_free() decrements the reference count to zero.
+ * mpol_put() decrements the reference count to zero.
*
- * Copying policy objects:
- * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
+ * Duplicating policy objects:
+ * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
* to the new storage. The reference count of the new object is initialized
- * to 1, representing the caller of mpol_copy().
+ * to 1, representing the caller of mpol_dup().
*/
struct mempolicy {
atomic_t refcnt;
- unsigned short policy; /* See MPOL_* above */
+ unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
union {
short preferred_node; /* preferred */
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
-extern void __mpol_free(struct mempolicy *pol);
-static inline void mpol_free(struct mempolicy *pol)
+extern void __mpol_put(struct mempolicy *pol);
+static inline void mpol_put(struct mempolicy *pol)
{
if (pol)
- __mpol_free(pol);
+ __mpol_put(pol);
+}
+
+/*
+ * Does mempolicy pol need explicit unref after use?
+ * Currently only needed for shared policies.
+ */
+static inline int mpol_needs_cond_ref(struct mempolicy *pol)
+{
+ return (pol && (pol->flags & MPOL_F_SHARED));
}
-extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
-static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+ if (mpol_needs_cond_ref(pol))
+ __mpol_put(pol);
+}
+
+extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol);
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol)
+{
+ if (!frompol)
+ return frompol;
+ return __mpol_cond_copy(tompol, frompol);
+}
+
+extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
if (pol)
- pol = __mpol_copy(pol);
+ pol = __mpol_dup(pol);
return pol;
}
spinlock_t lock;
};
-void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
- unsigned short flags, nodemask_t *nodes);
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
struct mempolicy *new);
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+
+#ifdef CONFIG_TMPFS
+extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
+
+extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+ int no_context);
+#endif
+
+/* Check if a vma is migratable */
+static inline int vma_migratable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
+ return 0;
+ /*
+ * Migration allocates pages in the highest zone. If we cannot
+ * do so then migration (at least from node to node) is not
+ * possible.
+ */
+ if (vma->vm_file &&
+ gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
+ < policy_zone)
+ return 0;
+ return 1;
+}
+
#else
struct mempolicy {};
return 1;
}
-static inline void mpol_free(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *p)
+{
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+}
+
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
+ struct mempolicy *from)
{
+ return from;
}
static inline void mpol_get(struct mempolicy *pol)
{
}
-static inline struct mempolicy *mpol_copy(struct mempolicy *old)
+static inline struct mempolicy *mpol_dup(struct mempolicy *old)
{
return NULL;
}
return -EINVAL;
}
-static inline void mpol_shared_policy_init(struct shared_policy *info,
- unsigned short policy, unsigned short flags, nodemask_t *nodes)
+static inline void mpol_shared_policy_init(struct shared_policy *sp,
+ struct mempolicy *mpol)
{
}
static inline void check_highest_zone(int k)
{
}
+
+#ifdef CONFIG_TMPFS
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
+ int no_context)
+{
+ return 1; /* error */
+}
+
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+ int no_context)
+{
+ return 0;
+}
+#endif
+
#endif /* CONFIG_NUMA */
#endif /* __KERNEL__ */