#include <linux/kmod.h>
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
+#include <linux/hash.h>
#include <asm/atomic.h>
* be called.
*/
static int need_forkexit_callback;
+static int need_mm_owner_callback __read_mostly;
/* convenient tests for these bits */
inline int cgroup_is_removed(const struct cgroup *cgrp)
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;
+/* hash table for cgroup groups. This improves the performance to
+ * find an existing css_set */
+#define CSS_SET_HASH_BITS 7
+#define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
+static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
+
+static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
+{
+ int i;
+ int index;
+ unsigned long tmp = 0UL;
+
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
+ tmp += (unsigned long)css[i];
+ tmp = (tmp >> 16) ^ tmp;
+
+ index = hash_long(tmp, CSS_SET_HASH_BITS);
+
+ return &css_set_table[index];
+}
+
/* We don't maintain the lists running through each css_set to its
* task until after the first call to cgroup_iter_start(). This
* reduces the fork()/exit() overhead for people who have cgroups
static void unlink_css_set(struct css_set *cg)
{
write_lock(&css_set_lock);
- list_del(&cg->list);
+ hlist_del(&cg->hlist);
css_set_count--;
while (!list_empty(&cg->cg_links)) {
struct cg_cgroup_link *link;
/*
* find_existing_css_set() is a helper for
* find_css_set(), and checks to see whether an existing
- * css_set is suitable. This currently walks a linked-list for
- * simplicity; a later patch will use a hash table for better
- * performance
+ * css_set is suitable.
*
* oldcg: the cgroup group that we're using before the cgroup
* transition
{
int i;
struct cgroupfs_root *root = cgrp->root;
- struct list_head *l = &init_css_set.list;
+ struct hlist_head *hhead;
+ struct hlist_node *node;
+ struct css_set *cg;
/* Built the set of subsystem state objects that we want to
* see in the new css_set */
}
}
- /* Look through existing cgroup groups to find one to reuse */
- do {
- struct css_set *cg =
- list_entry(l, struct css_set, list);
-
+ hhead = css_set_hash(template);
+ hlist_for_each_entry(cg, node, hhead, hlist) {
if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
/* All subsystems matched */
return cg;
}
- /* Try the next cgroup group */
- l = l->next;
- } while (l != &init_css_set.list);
+ }
/* No existing cgroup group matched */
return NULL;
struct list_head tmp_cg_links;
struct cg_cgroup_link *link;
+ struct hlist_head *hhead;
+
/* First see if we already have a cgroup group that matches
* the desired set */
write_lock(&css_set_lock);
kref_init(&res->ref);
INIT_LIST_HEAD(&res->cg_links);
INIT_LIST_HEAD(&res->tasks);
+ INIT_HLIST_NODE(&res->hlist);
/* Copy the set of subsystem state objects generated in
* find_existing_css_set() */
BUG_ON(!list_empty(&tmp_cg_links));
- /* Link this cgroup group into the list */
- list_add(&res->list, &init_css_set.list);
css_set_count++;
+
+ /* Add this cgroup group to the hash table */
+ hhead = css_set_hash(res->subsys);
+ hlist_add_head(&res->hlist, hhead);
+
write_unlock(&css_set_lock);
return res;
static struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
- .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
int ret = 0;
struct super_block *sb;
struct cgroupfs_root *root;
- struct list_head tmp_cg_links, *l;
+ struct list_head tmp_cg_links;
INIT_LIST_HEAD(&tmp_cg_links);
/* First find the desired set of subsystems */
/* New superblock */
struct cgroup *cgrp = &root->top_cgroup;
struct inode *inode;
+ int i;
BUG_ON(sb->s_root != NULL);
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
- l = &init_css_set.list;
- do {
+ for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
+ struct hlist_head *hhead = &css_set_table[i];
+ struct hlist_node *node;
struct css_set *cg;
- struct cg_cgroup_link *link;
- cg = list_entry(l, struct css_set, list);
- BUG_ON(list_empty(&tmp_cg_links));
- link = list_entry(tmp_cg_links.next,
- struct cg_cgroup_link,
- cgrp_link_list);
- list_del(&link->cgrp_link_list);
- link->cg = cg;
- list_add(&link->cgrp_link_list,
- &root->top_cgroup.css_sets);
- list_add(&link->cg_link_list, &cg->cg_links);
- l = l->next;
- } while (l != &init_css_set.list);
+
+ hlist_for_each_entry(cg, node, hhead, hlist) {
+ struct cg_cgroup_link *link;
+
+ BUG_ON(list_empty(&tmp_cg_links));
+ link = list_entry(tmp_cg_links.next,
+ struct cg_cgroup_link,
+ cgrp_link_list);
+ list_del(&link->cgrp_link_list);
+ link->cg = cg;
+ list_add(&link->cgrp_link_list,
+ &root->top_cgroup.css_sets);
+ list_add(&link->cg_link_list, &cg->cg_links);
+ }
+ }
write_unlock(&css_set_lock);
free_cg_links(&tmp_cg_links);
return cft->write(cgrp, cft, file, buf, nbytes, ppos);
if (cft->write_u64 || cft->write_s64)
return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+ if (cft->trigger) {
+ int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+ return ret ? ret : nbytes;
+ }
return -EINVAL;
}
{
struct cgroup_seqfile_state *state = m->private;
struct cftype *cft = state->cft;
- struct cgroup_map_cb cb = {
- .fill = cgroup_map_add,
- .state = m,
- };
- return cft->read_map(state->cgroup, cft, &cb);
+ if (cft->read_map) {
+ struct cgroup_map_cb cb = {
+ .fill = cgroup_map_add,
+ .state = m,
+ };
+ return cft->read_map(state->cgroup, cft, &cb);
+ }
+ return cft->read_seq_string(state->cgroup, cft, m);
}
int cgroup_seqfile_release(struct inode *inode, struct file *file)
cft = __d_cft(file->f_dentry);
if (!cft)
return -ENODEV;
- if (cft->read_map) {
+ if (cft->read_map || cft->read_seq_string) {
struct cgroup_seqfile_state *state =
kzalloc(sizeof(*state), GFP_USER);
if (!state)
static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
- struct list_head *l;
printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
BUG_ON(IS_ERR(css));
init_cgroup_css(css, ss, dummytop);
- /* Update all cgroup groups to contain a subsys
+ /* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
- * newly registered, all tasks and hence all cgroup
- * groups are in the subsystem's top cgroup. */
- write_lock(&css_set_lock);
- l = &init_css_set.list;
- do {
- struct css_set *cg =
- list_entry(l, struct css_set, list);
- cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
- l = l->next;
- } while (l != &init_css_set.list);
- write_unlock(&css_set_lock);
-
- /* If this subsystem requested that it be notified with fork
- * events, we should send it one now for every process in the
- * system */
- if (ss->fork) {
- struct task_struct *g, *p;
-
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- ss->fork(ss, p);
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
- }
+ * newly registered, all tasks and hence the
+ * init_css_set is in the subsystem's top cgroup. */
+ init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
need_forkexit_callback |= ss->fork || ss->exit;
+ need_mm_owner_callback |= !!ss->mm_owner_changed;
+
+ /* At system boot, before all subsystems have been
+ * registered, no tasks have been forked, so we don't
+ * need to invoke fork callbacks here. */
+ BUG_ON(!list_empty(&init_task.tasks));
ss->active = 1;
}
int i;
kref_init(&init_css_set.ref);
kref_get(&init_css_set.ref);
- INIT_LIST_HEAD(&init_css_set.list);
INIT_LIST_HEAD(&init_css_set.cg_links);
INIT_LIST_HEAD(&init_css_set.tasks);
+ INIT_HLIST_NODE(&init_css_set.hlist);
css_set_count = 1;
init_cgroup_root(&rootnode);
list_add(&rootnode.root_list, &roots);
list_add(&init_css_set_link.cg_link_list,
&init_css_set.cg_links);
+ for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&css_set_table[i]);
+
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
{
int err;
int i;
+ struct hlist_head *hhead;
err = bdi_init(&cgroup_backing_dev_info);
if (err)
cgroup_init_subsys(ss);
}
+ /* Add init_css_set to the hash table */
+ hhead = css_set_hash(init_css_set.subsys);
+ hlist_add_head(&init_css_set.hlist, hhead);
+
err = register_filesystem(&cgroup_fs_type);
if (err < 0)
goto out;
}
}
+#ifdef CONFIG_MM_OWNER
+/**
+ * cgroup_mm_owner_callbacks - run callbacks when the mm->owner changes
+ * @p: the new owner
+ *
+ * Called on every change to mm->owner. mm_init_owner() does not
+ * invoke this routine, since it assigns the mm->owner the first time
+ * and does not change it.
+ */
+void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
+{
+ struct cgroup *oldcgrp, *newcgrp;
+
+ if (need_mm_owner_callback) {
+ int i;
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ oldcgrp = task_cgroup(old, ss->subsys_id);
+ newcgrp = task_cgroup(new, ss->subsys_id);
+ if (oldcgrp == newcgrp)
+ continue;
+ if (ss->mm_owner_changed)
+ ss->mm_owner_changed(ss, oldcgrp, newcgrp);
+ }
+ }
+}
+#endif /* CONFIG_MM_OWNER */
+
/**
* cgroup_post_fork - called on a new task after adding it to the task list
* @child: the task in question
cg = tsk->cgroups;
parent = task_cgroup(tsk, subsys->subsys_id);
- snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
+ snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
/* Pin the hierarchy */
atomic_inc(&parent->root->sb->s_active);