* Processor and Memory placement constraints for sets of tasks.
*
* Copyright (C) 2003 BULL SA.
- * Copyright (C) 2004-2006 Silicon Graphics, Inc.
+ * Copyright (C) 2004-2007 Silicon Graphics, Inc.
* Copyright (C) 2006 Google, Inc
*
* Portions derived from Patrick Mochel's sysfs code.
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/cgroup.h>
/*
* Tracks how many cpusets are currently defined in system.
*/
int number_of_cpusets __read_mostly;
-/* Retrieve the cpuset from a cgroup */
+/* Forward declare cgroup structures */
struct cgroup_subsys cpuset_subsys;
struct cpuset;
int mems_generation;
struct fmeter fmeter; /* memory_pressure filter */
+
+ /* partition number for rebuild_sched_domains() */
+ int pn;
+
+ /* used for walking a cpuset heirarchy */
+ struct list_head stack_list;
};
/* Retrieve the cpuset for a cgroup */
return container_of(task_subsys_state(task, cpuset_subsys_id),
struct cpuset, css);
}
-
+struct cpuset_hotplug_scanner {
+ struct cgroup_scanner scan;
+ struct cgroup *to;
+};
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
CS_MEMORY_MIGRATE,
+ CS_SCHED_LOAD_BALANCE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
} cpuset_flagbits_t;
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
}
+static inline int is_sched_load_balance(const struct cpuset *cs)
+{
+ return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+}
+
static inline int is_memory_migrate(const struct cpuset *cs)
{
return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
* number, and avoid having to lock and reload mems_allowed unless
* the cpuset they're using changes generation.
*
- * A single, global generation is needed because attach_task() could
+ * A single, global generation is needed because cpuset_attach_task() could
* reattach a task to a different cpuset, which must not have its
* generation numbers aliased with those of that tasks previous cpuset.
*
* Generations are needed for mems_allowed because one task cannot
- * modify anothers memory placement. So we must enable every task,
+ * modify another's memory placement. So we must enable every task,
* on every visit to __alloc_pages(), to efficiently check whether
* its current->cpuset->mems_allowed has changed, requiring an update
* of its current->mems_allowed.
*
- * Since cpuset_mems_generation is guarded by manage_mutex,
+ * Since writes to cpuset_mems_generation are guarded by the cgroup lock
* there is no need to mark it atomic.
*/
static int cpuset_mems_generation;
};
/*
- * We have two global cpuset mutexes below. They can nest.
- * It is ok to first take manage_mutex, then nest callback_mutex. We also
- * require taking task_lock() when dereferencing a tasks cpuset pointer.
- * See "The task_lock() exception", at the end of this comment.
+ * There are two global mutexes guarding cpuset structures. The first
+ * is the main control groups cgroup_mutex, accessed via
+ * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific
+ * callback_mutex, below. They can nest. It is ok to first take
+ * cgroup_mutex, then nest callback_mutex. We also require taking
+ * task_lock() when dereferencing a task's cpuset pointer. See "The
+ * task_lock() exception", at the end of this comment.
*
* A task must hold both mutexes to modify cpusets. If a task
- * holds manage_mutex, then it blocks others wanting that mutex,
+ * holds cgroup_mutex, then it blocks others wanting that mutex,
* ensuring that it is the only task able to also acquire callback_mutex
* and be able to modify cpusets. It can perform various checks on
* the cpuset structure first, knowing nothing will change. It can
- * also allocate memory while just holding manage_mutex. While it is
+ * also allocate memory while just holding cgroup_mutex. While it is
* performing these checks, various callback routines can briefly
* acquire callback_mutex to query cpusets. Once it is ready to make
* the changes, it takes callback_mutex, blocking everyone else.
* The task_struct fields mems_allowed and mems_generation may only
* be accessed in the context of that task, so require no locks.
*
- * Any task can increment and decrement the count field without lock.
- * So in general, code holding manage_mutex or callback_mutex can't rely
- * on the count field not changing. However, if the count goes to
- * zero, then only attach_task(), which holds both mutexes, can
- * increment it again. Because a count of zero means that no tasks
- * are currently attached, therefore there is no way a task attached
- * to that cpuset can fork (the other way to increment the count).
- * So code holding manage_mutex or callback_mutex can safely assume that
- * if the count is zero, it will stay zero. Similarly, if a task
- * holds manage_mutex or callback_mutex on a cpuset with zero count, it
- * knows that the cpuset won't be removed, as cpuset_rmdir() needs
- * both of those mutexes.
- *
* The cpuset_common_file_write handler for operations that modify
- * the cpuset hierarchy holds manage_mutex across the entire operation,
+ * the cpuset hierarchy holds cgroup_mutex across the entire operation,
* single threading all such cpuset modifications across the system.
*
* The cpuset_common_file_read() handlers only hold callback_mutex across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
- * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
- * (usually) take either mutex. These are the two most performance
- * critical pieces of code here. The exception occurs on cpuset_exit(),
- * when a task in a notify_on_release cpuset exits. Then manage_mutex
- * is taken, and if the cpuset count is zero, a usermode call made
- * to /sbin/cpuset_release_agent with the name of the cpuset (path
- * relative to the root of cpuset file system) as the argument.
- *
- * A cpuset can only be deleted if both its 'count' of using tasks
- * is zero, and its list of 'children' cpusets is empty. Since all
- * tasks in the system use _some_ cpuset, and since there is always at
- * least one task in the system (init), therefore, top_cpuset
- * always has either children cpusets and/or using tasks. So we don't
- * need a special hack to ensure that top_cpuset cannot be deleted.
- *
- * The above "Tale of Two Semaphores" would be complete, but for:
- *
- * The task_lock() exception
- *
- * The need for this exception arises from the action of attach_task(),
- * which overwrites one tasks cpuset pointer with another. It does
- * so using both mutexes, however there are several performance
- * critical places that need to reference task->cpuset without the
- * expense of grabbing a system global mutex. Therefore except as
- * noted below, when dereferencing or, as in attach_task(), modifying
- * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
- * (task->alloc_lock) already in the task_struct routinely used for
- * such matters.
- *
- * P.S. One more locking exception. RCU is used to guard the
- * update of a tasks cpuset pointer by attach_task() and the
- * access of task->cpuset->mems_generation via that pointer in
- * the routine cpuset_update_task_memory_state().
+ * Accessing a task's cpuset should be done in accordance with the
+ * guidelines for accessing subsystem state in kernel/cgroup.c
*/
static DEFINE_MUTEX(callback_mutex);
* Do not call this routine if in_interrupt().
*
* Call without callback_mutex or task_lock() held. May be
- * called with or without manage_mutex held. Thanks in part to
- * 'the_top_cpuset_hack', the tasks cpuset pointer will never
+ * called with or without cgroup_mutex held. Thanks in part to
+ * 'the_top_cpuset_hack', the task's cpuset pointer will never
* be NULL. This routine also might acquire callback_mutex and
* current->mm->mmap_sem during call.
*
* Reading current->cpuset->mems_generation doesn't need task_lock
* to guard the current->cpuset derefence, because it is guarded
- * from concurrent freeing of current->cpuset by attach_task(),
- * using RCU.
+ * from concurrent freeing of current->cpuset using RCU.
*
* The rcu_dereference() is technically probably not needed,
* as I don't actually mind if I see a new cpuset pointer but
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding manage_mutex.
+ * are only set if the other's are set. Call holding cgroup_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
- * manage_mutex held.
+ * cgroup_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
if (!is_cpuset_subset(trial, par))
return -EACCES;
- /* If either I or some sibling (!= me) is exclusive, we can't overlap */
+ /*
+ * If either I or some sibling (!= me) is exclusive, we can't
+ * overlap
+ */
list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
c = cgroup_cs(cont);
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
return -EINVAL;
}
+ /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
+ if (cgroup_task_count(cur->css.cgroup)) {
+ if (cpus_empty(trial->cpus_allowed) ||
+ nodes_empty(trial->mems_allowed)) {
+ return -ENOSPC;
+ }
+ }
+
return 0;
}
/*
- * Call with manage_mutex held. May take callback_mutex during call.
+ * Helper routine for rebuild_sched_domains().
+ * Do cpusets a, b have overlapping cpus_allowed masks?
+ */
+
+static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
+{
+ return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
+}
+
+/*
+ * rebuild_sched_domains()
+ *
+ * If the flag 'sched_load_balance' of any cpuset with non-empty
+ * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
+ * which has that flag enabled, or if any cpuset with a non-empty
+ * 'cpus' is removed, then call this routine to rebuild the
+ * scheduler's dynamic sched domains.
+ *
+ * This routine builds a partial partition of the systems CPUs
+ * (the set of non-overlappping cpumask_t's in the array 'part'
+ * below), and passes that partial partition to the kernel/sched.c
+ * partition_sched_domains() routine, which will rebuild the
+ * schedulers load balancing domains (sched domains) as specified
+ * by that partial partition. A 'partial partition' is a set of
+ * non-overlapping subsets whose union is a subset of that set.
+ *
+ * See "What is sched_load_balance" in Documentation/cpusets.txt
+ * for a background explanation of this.
+ *
+ * Does not return errors, on the theory that the callers of this
+ * routine would rather not worry about failures to rebuild sched
+ * domains when operating in the severe memory shortage situations
+ * that could cause allocation failures below.
+ *
+ * Call with cgroup_mutex held. May take callback_mutex during
+ * call due to the kfifo_alloc() and kmalloc() calls. May nest
+ * a call to the get_online_cpus()/put_online_cpus() pair.
+ * Must not be called holding callback_mutex, because we must not
+ * call get_online_cpus() while holding callback_mutex. Elsewhere
+ * the kernel nests callback_mutex inside get_online_cpus() calls.
+ * So the reverse nesting would risk an ABBA deadlock.
+ *
+ * The three key local variables below are:
+ * q - a kfifo queue of cpuset pointers, used to implement a
+ * top-down scan of all cpusets. This scan loads a pointer
+ * to each cpuset marked is_sched_load_balance into the
+ * array 'csa'. For our purposes, rebuilding the schedulers
+ * sched domains, we can ignore !is_sched_load_balance cpusets.
+ * csa - (for CpuSet Array) Array of pointers to all the cpusets
+ * that need to be load balanced, for convenient iterative
+ * access by the subsequent code that finds the best partition,
+ * i.e the set of domains (subsets) of CPUs such that the
+ * cpus_allowed of every cpuset marked is_sched_load_balance
+ * is a subset of one of these domains, while there are as
+ * many such domains as possible, each as small as possible.
+ * doms - Conversion of 'csa' to an array of cpumasks, for passing to
+ * the kernel/sched.c routine partition_sched_domains() in a
+ * convenient format, that can be easily compared to the prior
+ * value to determine what partition elements (sched domains)
+ * were changed (added or removed.)
+ *
+ * Finding the best partition (set of domains):
+ * The triple nested loops below over i, j, k scan over the
+ * load balanced cpusets (using the array of cpuset pointers in
+ * csa[]) looking for pairs of cpusets that have overlapping
+ * cpus_allowed, but which don't have the same 'pn' partition
+ * number and gives them in the same partition number. It keeps
+ * looping on the 'restart' label until it can no longer find
+ * any such pairs.
+ *
+ * The union of the cpus_allowed masks from the set of
+ * all cpusets having the same 'pn' value then form the one
+ * element of the partition (one sched domain) to be passed to
+ * partition_sched_domains().
*/
+static void rebuild_sched_domains(void)
+{
+ struct kfifo *q; /* queue of cpusets to be scanned */
+ struct cpuset *cp; /* scans q */
+ struct cpuset **csa; /* array of all cpuset ptrs */
+ int csn; /* how many cpuset ptrs in csa so far */
+ int i, j, k; /* indices for partition finding loops */
+ cpumask_t *doms; /* resulting partition; i.e. sched domains */
+ int ndoms; /* number of sched domains in result */
+ int nslot; /* next empty doms[] cpumask_t slot */
+
+ q = NULL;
+ csa = NULL;
+ doms = NULL;
+
+ /* Special case for the 99% of systems with one, full, sched domain */
+ if (is_sched_load_balance(&top_cpuset)) {
+ ndoms = 1;
+ doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!doms)
+ goto rebuild;
+ *doms = top_cpuset.cpus_allowed;
+ goto rebuild;
+ }
+
+ q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
+ if (IS_ERR(q))
+ goto done;
+ csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
+ if (!csa)
+ goto done;
+ csn = 0;
+
+ cp = &top_cpuset;
+ __kfifo_put(q, (void *)&cp, sizeof(cp));
+ while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
+ struct cgroup *cont;
+ struct cpuset *child; /* scans child cpusets of cp */
+ if (is_sched_load_balance(cp))
+ csa[csn++] = cp;
+ list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
+ child = cgroup_cs(cont);
+ __kfifo_put(q, (void *)&child, sizeof(cp));
+ }
+ }
+
+ for (i = 0; i < csn; i++)
+ csa[i]->pn = i;
+ ndoms = csn;
+
+restart:
+ /* Find the best partition (set of sched domains) */
+ for (i = 0; i < csn; i++) {
+ struct cpuset *a = csa[i];
+ int apn = a->pn;
+
+ for (j = 0; j < csn; j++) {
+ struct cpuset *b = csa[j];
+ int bpn = b->pn;
+
+ if (apn != bpn && cpusets_overlap(a, b)) {
+ for (k = 0; k < csn; k++) {
+ struct cpuset *c = csa[k];
+
+ if (c->pn == bpn)
+ c->pn = apn;
+ }
+ ndoms--; /* one less element */
+ goto restart;
+ }
+ }
+ }
+
+ /* Convert <csn, csa> to <ndoms, doms> */
+ doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
+ if (!doms)
+ goto rebuild;
+
+ for (nslot = 0, i = 0; i < csn; i++) {
+ struct cpuset *a = csa[i];
+ int apn = a->pn;
+
+ if (apn >= 0) {
+ cpumask_t *dp = doms + nslot;
+
+ if (nslot == ndoms) {
+ static int warnings = 10;
+ if (warnings) {
+ printk(KERN_WARNING
+ "rebuild_sched_domains confused:"
+ " nslot %d, ndoms %d, csn %d, i %d,"
+ " apn %d\n",
+ nslot, ndoms, csn, i, apn);
+ warnings--;
+ }
+ continue;
+ }
+
+ cpus_clear(*dp);
+ for (j = i; j < csn; j++) {
+ struct cpuset *b = csa[j];
+
+ if (apn == b->pn) {
+ cpus_or(*dp, *dp, b->cpus_allowed);
+ b->pn = -1;
+ }
+ }
+ nslot++;
+ }
+ }
+ BUG_ON(nslot != ndoms);
+
+rebuild:
+ /* Have scheduler rebuild sched domains */
+ get_online_cpus();
+ partition_sched_domains(ndoms, doms);
+ put_online_cpus();
+
+done:
+ if (q && !IS_ERR(q))
+ kfifo_free(q);
+ kfree(csa);
+ /* Don't kfree(doms) -- partition_sched_domains() does that. */
+}
+
+static inline int started_after_time(struct task_struct *t1,
+ struct timespec *time,
+ struct task_struct *t2)
+{
+ int start_diff = timespec_compare(&t1->start_time, time);
+ if (start_diff > 0) {
+ return 1;
+ } else if (start_diff < 0) {
+ return 0;
+ } else {
+ /*
+ * Arbitrarily, if two processes started at the same
+ * time, we'll say that the lower pointer value
+ * started first. Note that t2 may have exited by now
+ * so this may not be a valid pointer any longer, but
+ * that's fine - it still serves to distinguish
+ * between two tasks started (effectively)
+ * simultaneously.
+ */
+ return t1 > t2;
+ }
+}
+
+static inline int started_after(void *p1, void *p2)
+{
+ struct task_struct *t1 = p1;
+ struct task_struct *t2 = p2;
+ return started_after_time(t1, &t2->start_time, t2);
+}
+
+/**
+ * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
+ * @tsk: task to test
+ * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
+ *
+ * Call with cgroup_mutex held. May take callback_mutex during call.
+ * Called for each task in a cgroup by cgroup_scan_tasks().
+ * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
+ * words, if its mask is not equal to its cpuset's mask).
+ */
+int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+{
+ return !cpus_equal(tsk->cpus_allowed,
+ (cgroup_cs(scan->cg))->cpus_allowed);
+}
+
+/**
+ * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
+ * @tsk: task to test
+ * @scan: struct cgroup_scanner containing the cgroup of the task
+ *
+ * Called by cgroup_scan_tasks() for each task in a cgroup whose
+ * cpus_allowed mask needs to be changed.
+ *
+ * We don't need to re-check for the cgroup/cpuset membership, since we're
+ * holding cgroup_lock() at this point.
+ */
+void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+{
+ set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed);
+}
+
+/**
+ * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
+ * @cs: the cpuset to consider
+ * @buf: buffer of cpu numbers written to this cpuset
+ */
static int update_cpumask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
+ struct cgroup_scanner scan;
+ struct ptr_heap heap;
int retval;
+ int is_load_balanced;
/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
if (cs == &top_cpuset)
trialcs = *cs;
/*
- * We allow a cpuset's cpus_allowed to be empty; if it has attached
- * tasks, we'll catch it later when we validate the change and return
- * -ENOSPC.
+ * An empty cpus_allowed is ok only if the cpuset has no tasks.
+ * Since cpulist_parse() fails on an empty mask, we special case
+ * that parsing. The validate_change() call ensures that cpusets
+ * with tasks have cpus.
*/
- if (!buf[0] || (buf[0] == '\n' && !buf[1])) {
+ buf = strstrip(buf);
+ if (!*buf) {
cpus_clear(trialcs.cpus_allowed);
} else {
retval = cpulist_parse(buf, trialcs.cpus_allowed);
return retval;
}
cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
- /* cpus_allowed cannot be empty for a cpuset with attached tasks. */
- if (cgroup_task_count(cs->css.cgroup) &&
- cpus_empty(trialcs.cpus_allowed))
- return -ENOSPC;
retval = validate_change(cs, &trialcs);
if (retval < 0)
return retval;
+
+ /* Nothing to do if the cpus didn't change */
+ if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
+ return 0;
+
+ retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
+ if (retval)
+ return retval;
+
+ is_load_balanced = is_sched_load_balance(&trialcs);
+
mutex_lock(&callback_mutex);
cs->cpus_allowed = trialcs.cpus_allowed;
mutex_unlock(&callback_mutex);
+
+ /*
+ * Scan tasks in the cpuset, and update the cpumasks of any
+ * that need an update.
+ */
+ scan.cg = cs->css.cgroup;
+ scan.test_task = cpuset_test_cpumask;
+ scan.process_task = cpuset_change_cpumask;
+ scan.heap = &heap;
+ cgroup_scan_tasks(&scan);
+ heap_free(&heap);
+
+ if (is_load_balanced)
+ rebuild_sched_domains();
return 0;
}
* Temporarilly set tasks mems_allowed to target nodes of migration,
* so that the migration code can allocate pages on these nodes.
*
- * Call holding manage_mutex, so our current->cpuset won't change
- * during this call, as manage_mutex holds off any attach_task()
+ * Call holding cgroup_mutex, so current's cpuset won't change
+ * during this call, as manage_mutex holds off any cpuset_attach()
* calls. Therefore we don't need to take task_lock around the
* call to guarantee_online_mems(), as we know no one is changing
- * our tasks cpuset.
+ * our task's cpuset.
*
* Hold callback_mutex around the two modifications of our tasks
* mems_allowed to synchronize with cpuset_mems_allowed().
* the cpuset is marked 'memory_migrate', migrate the tasks
* pages to the new memory.
*
- * Call with manage_mutex held. May take callback_mutex during call.
+ * Call with cgroup_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
trialcs = *cs;
/*
- * We allow a cpuset's mems_allowed to be empty; if it has attached
- * tasks, we'll catch it later when we validate the change and return
- * -ENOSPC.
+ * An empty mems_allowed is ok iff there are no tasks in the cpuset.
+ * Since nodelist_parse() fails on an empty mask, we special case
+ * that parsing. The validate_change() call ensures that cpusets
+ * with tasks have memory.
*/
- if (!buf[0] || (buf[0] == '\n' && !buf[1])) {
+ buf = strstrip(buf);
+ if (!*buf) {
nodes_clear(trialcs.mems_allowed);
} else {
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
goto done;
- if (!nodes_intersects(trialcs.mems_allowed,
- node_states[N_HIGH_MEMORY])) {
- /*
- * error if only memoryless nodes specified.
- */
- retval = -ENOSPC;
- goto done;
- }
}
- /*
- * Exclude memoryless nodes. We know that trialcs.mems_allowed
- * contains at least one node with memory.
- */
nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
node_states[N_HIGH_MEMORY]);
oldmem = cs->mems_allowed;
retval = 0; /* Too easy - nothing to do */
goto done;
}
- /* mems_allowed cannot be empty for a cpuset with attached tasks. */
- if (cgroup_task_count(cs->css.cgroup) &&
- nodes_empty(trialcs.mems_allowed)) {
- retval = -ENOSPC;
- goto done;
- }
retval = validate_change(cs, &trialcs);
if (retval < 0)
goto done;
* tasklist_lock. Forks can happen again now - the mpol_copy()
* cpuset_being_rebound check will catch such forks, and rebind
* their vma mempolicies too. Because we still hold the global
- * cpuset manage_mutex, we know that no other rebind effort will
+ * cgroup_mutex, we know that no other rebind effort will
* be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
mmput(mm);
}
- /* We're done rebinding vma's to this cpusets new mems_allowed. */
+ /* We're done rebinding vmas to this cpuset's new mems_allowed. */
kfree(mmarray);
cpuset_being_rebound = NULL;
retval = 0;
}
/*
- * Call with manage_mutex held.
+ * Call with cgroup_mutex held.
*/
static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
+ * CS_SCHED_LOAD_BALANCE,
* CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
* CS_SPREAD_PAGE, CS_SPREAD_SLAB)
* cs: the cpuset to update
* buf: the buffer where we read the 0 or 1
*
- * Call with manage_mutex held.
+ * Call with cgroup_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
int turning_on;
struct cpuset trialcs;
int err;
+ int cpus_nonempty, balance_flag_changed;
turning_on = (simple_strtoul(buf, NULL, 10) != 0);
err = validate_change(cs, &trialcs);
if (err < 0)
return err;
+
+ cpus_nonempty = !cpus_empty(trialcs.cpus_allowed);
+ balance_flag_changed = (is_sched_load_balance(cs) !=
+ is_sched_load_balance(&trialcs));
+
mutex_lock(&callback_mutex);
cs->flags = trialcs.flags;
mutex_unlock(&callback_mutex);
+ if (cpus_nonempty && balance_flag_changed)
+ rebuild_sched_domains();
+
return 0;
}
return val;
}
+/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
static int cpuset_can_attach(struct cgroup_subsys *ss,
struct cgroup *cont, struct task_struct *tsk)
{
FILE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
+ FILE_SCHED_LOAD_BALANCE,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
int retval = 0;
/* Crude upper limit on largest legitimate cpulist user might write. */
- if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES))
+ if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES))
return -E2BIG;
/* +1 for nul-terminator */
case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
break;
+ case FILE_SCHED_LOAD_BALANCE:
+ retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
+ break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
break;
case FILE_MEM_EXCLUSIVE:
*s++ = is_mem_exclusive(cs) ? '1' : '0';
break;
+ case FILE_SCHED_LOAD_BALANCE:
+ *s++ = is_sched_load_balance(cs) ? '1' : '0';
+ break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
break;
.private = FILE_MEM_EXCLUSIVE,
};
+static struct cftype cft_sched_load_balance = {
+ .name = "sched_load_balance",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
+ .private = FILE_SCHED_LOAD_BALANCE,
+};
+
static struct cftype cft_memory_migrate = {
.name = "memory_migrate",
.read = cpuset_common_file_read,
return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
return err;
+ if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
+ return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
* If this becomes a problem for some users who wish to
* allow that scenario, then cpuset_post_clone() could be
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
- * (and likewise for mems) to the new cgroup.
+ * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
+ * held.
*/
static void cpuset_post_clone(struct cgroup_subsys *ss,
struct cgroup *cgroup)
/*
* cpuset_create - create a cpuset
- * parent: cpuset that will be parent of the new cpuset.
- * name: name of the new cpuset. Will be strcpy'ed.
- * mode: mode to set on new inode
- *
- * Must be called with the mutex on the parent inode held
+ * ss: cpuset cgroup subsystem
+ * cont: control group that the new cpuset will be part of
*/
static struct cgroup_subsys_state *cpuset_create(
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
+ set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cs->cpus_allowed = CPU_MASK_NONE;
cs->mems_allowed = NODE_MASK_NONE;
cs->mems_generation = cpuset_mems_generation++;
return &cs->css ;
}
+/*
+ * Locking note on the strange update_flag() call below:
+ *
+ * If the cpuset being removed has its flag 'sched_load_balance'
+ * enabled, then simulate turning sched_load_balance off, which
+ * will call rebuild_sched_domains(). The get_online_cpus()
+ * call in rebuild_sched_domains() must not be made while holding
+ * callback_mutex. Elsewhere the kernel nests callback_mutex inside
+ * get_online_cpus() calls. So the reverse nesting would risk an
+ * ABBA deadlock.
+ */
+
static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct cpuset *cs = cgroup_cs(cont);
cpuset_update_task_memory_state();
+
+ if (is_sched_load_balance(cs))
+ update_flag(CS_SCHED_LOAD_BALANCE, cs, "0");
+
number_of_cpusets--;
kfree(cs);
}
fmeter_init(&top_cpuset.fmeter);
top_cpuset.mems_generation = cpuset_mems_generation++;
+ set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
err = register_filesystem(&cpuset_fs_type);
if (err < 0)
return 0;
}
+/**
+ * cpuset_do_move_task - move a given task to another cpuset
+ * @tsk: pointer to task_struct the task to move
+ * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
+ *
+ * Called by cgroup_scan_tasks() for each task in a cgroup.
+ * Return nonzero to stop the walk through the tasks.
+ */
+void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
+{
+ struct cpuset_hotplug_scanner *chsp;
+
+ chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
+ cgroup_attach_task(chsp->to, tsk);
+}
+
+/**
+ * move_member_tasks_to_cpuset - move tasks from one cpuset to another
+ * @from: cpuset in which the tasks currently reside
+ * @to: cpuset to which the tasks will be moved
+ *
+ * Called with cgroup_mutex held
+ * callback_mutex must not be held, as cpuset_attach() will take it.
+ *
+ * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * calling callback functions for each.
+ */
+static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
+{
+ struct cpuset_hotplug_scanner scan;
+
+ scan.scan.cg = from->css.cgroup;
+ scan.scan.test_task = NULL; /* select all tasks in cgroup */
+ scan.scan.process_task = cpuset_do_move_task;
+ scan.scan.heap = NULL;
+ scan.to = to->css.cgroup;
+
+ if (cgroup_scan_tasks((struct cgroup_scanner *)&scan))
+ printk(KERN_ERR "move_member_tasks_to_cpuset: "
+ "cgroup_scan_tasks failed\n");
+}
+
/*
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
- * last CPU or node from a cpuset, then the guarantee_online_cpus()
- * or guarantee_online_mems() code will use that emptied cpusets
- * parent online CPUs or nodes. Cpusets that were already empty of
- * CPUs or nodes are left empty.
+ * last CPU or node from a cpuset, then move the tasks in the empty
+ * cpuset to its next-highest non-empty parent.
*
- * This routine is intentionally inefficient in a couple of regards.
- * It will check all cpusets in a subtree even if the top cpuset of
- * the subtree has no offline CPUs or nodes. It checks both CPUs and
- * nodes, even though the caller could have been coded to know that
- * only one of CPUs or nodes needed to be checked on a given call.
- * This was done to minimize text size rather than cpu cycles.
+ * Called with cgroup_mutex held
+ * callback_mutex must not be held, as cpuset_attach() will take it.
+ */
+static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+{
+ struct cpuset *parent;
+
+ /*
+ * The cgroup's css_sets list is in use if there are tasks
+ * in the cpuset; the list is empty if there are none;
+ * the cs->css.refcnt seems always 0.
+ */
+ if (list_empty(&cs->css.cgroup->css_sets))
+ return;
+
+ /*
+ * Find its next-highest non-empty parent, (top cpuset
+ * has online cpus, so can't be empty).
+ */
+ parent = cs->parent;
+ while (cpus_empty(parent->cpus_allowed) ||
+ nodes_empty(parent->mems_allowed))
+ parent = parent->parent;
+
+ move_member_tasks_to_cpuset(cs, parent);
+}
+
+/*
+ * Walk the specified cpuset subtree and look for empty cpusets.
+ * The tasks of such cpuset must be moved to a parent cpuset.
*
- * Call with both manage_mutex and callback_mutex held.
+ * Called with cgroup_mutex held. We take callback_mutex to modify
+ * cpus_allowed and mems_allowed.
*
- * Recursive, on depth of cpuset subtree.
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
+ *
+ * For now, since we lack memory hot unplug, we'll never see a cpuset
+ * that has tasks along with an empty 'mems'. But if we did see such
+ * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
*/
-
-static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
+static void scan_for_empty_cpusets(const struct cpuset *root)
{
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+ struct list_head queue;
struct cgroup *cont;
- struct cpuset *c;
- /* Each of our child cpusets mems must be online */
- list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
- c = cgroup_cs(cont);
- guarantee_online_cpus_mems_in_subtree(c);
- if (!cpus_empty(c->cpus_allowed))
- guarantee_online_cpus(c, &c->cpus_allowed);
- if (!nodes_empty(c->mems_allowed))
- guarantee_online_mems(c, &c->mems_allowed);
+ INIT_LIST_HEAD(&queue);
+
+ list_add_tail((struct list_head *)&root->stack_list, &queue);
+
+ while (!list_empty(&queue)) {
+ cp = container_of(queue.next, struct cpuset, stack_list);
+ list_del(queue.next);
+ list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
+ child = cgroup_cs(cont);
+ list_add_tail(&child->stack_list, &queue);
+ }
+ cont = cp->css.cgroup;
+
+ /* Continue past cpusets with all cpus, mems online */
+ if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
+ nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
+ continue;
+
+ /* Remove offline cpus and mems from this cpuset. */
+ mutex_lock(&callback_mutex);
+ cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+ nodes_and(cp->mems_allowed, cp->mems_allowed,
+ node_states[N_HIGH_MEMORY]);
+ mutex_unlock(&callback_mutex);
+
+ /* Move tasks from the empty cpuset to a parent */
+ if (cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))
+ remove_tasks_in_empty_cpuset(cp);
}
}
/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
* cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
- * track what's online after any CPU or memory node hotplug or unplug
- * event.
- *
- * To ensure that we don't remove a CPU or node from the top cpuset
- * that is currently in use by a child cpuset (which would violate
- * the rule that cpusets must be subsets of their parent), we first
- * call the recursive routine guarantee_online_cpus_mems_in_subtree().
+ * track what's online after any CPU or memory node hotplug or unplug event.
*
* Since there are two callers of this routine, one for CPU hotplug
* events and one for memory node hotplug events, we could have coded
static void common_cpu_mem_hotplug_unplug(void)
{
cgroup_lock();
- mutex_lock(&callback_mutex);
- guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+ scan_for_empty_cpusets(&top_cpuset);
- mutex_unlock(&callback_mutex);
cgroup_unlock();
}
* cpu_online_map on each CPU hotplug (cpuhp) event.
*/
-static int cpuset_handle_cpuhp(struct notifier_block *nb,
- unsigned long phase, void *cpu)
+static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
+ unsigned long phase, void *unused_cpu)
{
if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
return NOTIFY_DONE;
cpumask_t mask;
mutex_lock(&callback_mutex);
+ mask = cpuset_cpus_allowed_locked(tsk);
+ mutex_unlock(&callback_mutex);
+
+ return mask;
+}
+
+/**
+ * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
+ * Must be called with callback_mutex held.
+ **/
+cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
+{
+ cpumask_t mask;
+
task_lock(tsk);
guarantee_online_cpus(task_cs(tsk), &mask);
task_unlock(tsk);
- mutex_unlock(&callback_mutex);
return mask;
}
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
- * and we take manage_mutex, keeping attach_task() from changing it
- * anyway. No need to check that tsk->cpuset != NULL, thanks to
- * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks
- * cpuset to top_cpuset.
+ * and we take cgroup_mutex, keeping cpuset_attach() from changing it
+ * anyway.
*/
-static int proc_cpuset_show(struct seq_file *m, void *v)
+static int proc_cpuset_show(struct seq_file *m, void *unused_v)
{
struct pid *pid;
struct task_struct *tsk;
#endif /* CONFIG_PROC_PID_CPUSET */
/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
-char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
-{
- buffer += sprintf(buffer, "Cpus_allowed:\t");
- buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed);
- buffer += sprintf(buffer, "\n");
- buffer += sprintf(buffer, "Mems_allowed:\t");
- buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed);
- buffer += sprintf(buffer, "\n");
- return buffer;
+void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
+{
+ seq_printf(m, "Cpus_allowed:\t");
+ m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
+ task->cpus_allowed);
+ seq_printf(m, "\n");
+ seq_printf(m, "Mems_allowed:\t");
+ m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
+ task->mems_allowed);
+ seq_printf(m, "\n");
}