#include <linux/kernel.h>
#include <linux/taskstats_kern.h>
+#include <linux/tsacct_kern.h>
#include <linux/delayacct.h>
+#include <linux/tsacct_kern.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
#include <net/genetlink.h>
#include <asm/atomic.h>
+/*
+ * Maximum length of a cpumask that can be specified in
+ * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
+ */
+#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
+
static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
static int family_registered;
kmem_cache_t *taskstats_cache;
__read_mostly = {
[TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
+ [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
+ [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
+
+struct listener {
+ struct list_head list;
+ pid_t pid;
+ char valid;
+};
+
+struct listener_list {
+ struct rw_semaphore sem;
+ struct list_head list;
};
+static DEFINE_PER_CPU(struct listener_list, listener_array);
+enum actions {
+ REGISTER,
+ DEREGISTER,
+ CPU_DONT_CARE
+};
static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
void **replyp, size_t size)
/*
* If new attributes are added, please revisit this allocation
*/
- skb = nlmsg_new(size);
+ skb = genlmsg_new(size, GFP_KERNEL);
if (!skb)
return -ENOMEM;
int seq = get_cpu_var(taskstats_seqnum)++;
put_cpu_var(taskstats_seqnum);
- reply = genlmsg_put(skb, 0, seq,
- family.id, 0, 0,
- cmd, family.version);
+ reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
} else
- reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
- family.id, 0, 0,
- cmd, family.version);
+ reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
if (reply == NULL) {
nlmsg_free(skb);
return -EINVAL;
return 0;
}
-static int send_reply(struct sk_buff *skb, pid_t pid, int event)
+/*
+ * Send taskstats data in @skb to listener with nl_pid @pid
+ */
+static int send_reply(struct sk_buff *skb, pid_t pid)
{
struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
- void *reply;
+ void *reply = genlmsg_data(genlhdr);
int rc;
- reply = genlmsg_data(genlhdr);
-
rc = genlmsg_end(skb, reply);
if (rc < 0) {
nlmsg_free(skb);
return rc;
}
- if (event == TASKSTATS_MSG_MULTICAST)
- return genlmsg_multicast(skb, pid, TASKSTATS_LISTEN_GROUP);
return genlmsg_unicast(skb, pid);
}
-static int fill_pid(pid_t pid, struct task_struct *pidtsk,
+/*
+ * Send taskstats data in @skb to listeners registered for @cpu's exit data
+ */
+static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
+{
+ struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
+ struct listener_list *listeners;
+ struct listener *s, *tmp;
+ struct sk_buff *skb_next, *skb_cur = skb;
+ void *reply = genlmsg_data(genlhdr);
+ int rc, delcount = 0;
+
+ rc = genlmsg_end(skb, reply);
+ if (rc < 0) {
+ nlmsg_free(skb);
+ return;
+ }
+
+ rc = 0;
+ listeners = &per_cpu(listener_array, cpu);
+ down_read(&listeners->sem);
+ list_for_each_entry(s, &listeners->list, list) {
+ skb_next = NULL;
+ if (!list_is_last(&s->list, &listeners->list)) {
+ skb_next = skb_clone(skb_cur, GFP_KERNEL);
+ if (!skb_next)
+ break;
+ }
+ rc = genlmsg_unicast(skb_cur, s->pid);
+ if (rc == -ECONNREFUSED) {
+ s->valid = 0;
+ delcount++;
+ }
+ skb_cur = skb_next;
+ }
+ up_read(&listeners->sem);
+
+ if (skb_cur)
+ nlmsg_free(skb_cur);
+
+ if (!delcount)
+ return;
+
+ /* Delete invalidated entries */
+ down_write(&listeners->sem);
+ list_for_each_entry_safe(s, tmp, &listeners->list, list) {
+ if (!s->valid) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ }
+ up_write(&listeners->sem);
+}
+
+static int fill_pid(pid_t pid, struct task_struct *tsk,
struct taskstats *stats)
{
- int rc;
- struct task_struct *tsk = pidtsk;
+ int rc = 0;
- if (!pidtsk) {
- read_lock(&tasklist_lock);
+ if (!tsk) {
+ rcu_read_lock();
tsk = find_task_by_pid(pid);
- if (!tsk) {
- read_unlock(&tasklist_lock);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
return -ESRCH;
- }
- get_task_struct(tsk);
- read_unlock(&tasklist_lock);
} else
get_task_struct(tsk);
* Each accounting subsystem adds calls to its functions to
* fill in relevant parts of struct taskstsats as follows
*
- * rc = per-task-foo(stats, tsk);
- * if (rc)
- * goto err;
+ * per-task-foo(stats, tsk);
*/
- rc = delayacct_add_tsk(stats, tsk);
+ delayacct_add_tsk(stats, tsk);
+
+ /* fill in basic acct fields */
stats->version = TASKSTATS_VERSION;
+ bacct_add_tsk(stats, tsk);
+
+ /* fill in extended acct fields */
+ xacct_add_tsk(stats, tsk);
/* Define err: label here if needed */
put_task_struct(tsk);
}
-static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
+static int fill_tgid(pid_t tgid, struct task_struct *first,
struct taskstats *stats)
{
- struct task_struct *tsk, *first;
+ struct task_struct *tsk;
unsigned long flags;
+ int rc = -ESRCH;
/*
* Add additional stats from live tasks except zombie thread group
* leaders who are already counted with the dead tasks
*/
- first = tgidtsk;
- if (!first) {
- read_lock(&tasklist_lock);
+ rcu_read_lock();
+ if (!first)
first = find_task_by_pid(tgid);
- if (!first) {
- read_unlock(&tasklist_lock);
- return -ESRCH;
- }
- get_task_struct(first);
- read_unlock(&tasklist_lock);
- } else
- get_task_struct(first);
- /* Start with stats from dead tasks */
- spin_lock_irqsave(&first->signal->stats_lock, flags);
+ if (!first || !lock_task_sighand(first, &flags))
+ goto out;
+
if (first->signal->stats)
memcpy(stats, first->signal->stats, sizeof(*stats));
- spin_unlock_irqrestore(&first->signal->stats_lock, flags);
tsk = first;
- read_lock(&tasklist_lock);
do {
- if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
+ if (tsk->exit_state)
continue;
/*
* Accounting subsystem can call its functions here to
delayacct_add_tsk(stats, tsk);
} while_each_thread(first, tsk);
- read_unlock(&tasklist_lock);
- stats->version = TASKSTATS_VERSION;
+ unlock_task_sighand(first, &flags);
+ rc = 0;
+out:
+ rcu_read_unlock();
+
+ stats->version = TASKSTATS_VERSION;
/*
* Accounting subsytems can also add calls here to modify
* fields of taskstats.
*/
-
- return 0;
+ return rc;
}
{
unsigned long flags;
- spin_lock_irqsave(&tsk->signal->stats_lock, flags);
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
if (!tsk->signal->stats)
goto ret;
*/
delayacct_add_tsk(tsk->signal->stats, tsk);
ret:
- spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
return;
}
+static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
+{
+ struct listener_list *listeners;
+ struct listener *s, *tmp;
+ unsigned int cpu;
+ cpumask_t mask = *maskp;
+
+ if (!cpus_subset(mask, cpu_possible_map))
+ return -EINVAL;
+
+ if (isadd == REGISTER) {
+ for_each_cpu_mask(cpu, mask) {
+ s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!s)
+ goto cleanup;
+ s->pid = pid;
+ INIT_LIST_HEAD(&s->list);
+ s->valid = 1;
+
+ listeners = &per_cpu(listener_array, cpu);
+ down_write(&listeners->sem);
+ list_add(&s->list, &listeners->list);
+ up_write(&listeners->sem);
+ }
+ return 0;
+ }
-static int taskstats_send_stats(struct sk_buff *skb, struct genl_info *info)
+ /* Deregister or cleanup */
+cleanup:
+ for_each_cpu_mask(cpu, mask) {
+ listeners = &per_cpu(listener_array, cpu);
+ down_write(&listeners->sem);
+ list_for_each_entry_safe(s, tmp, &listeners->list, list) {
+ if (s->pid == pid) {
+ list_del(&s->list);
+ kfree(s);
+ break;
+ }
+ }
+ up_write(&listeners->sem);
+ }
+ return 0;
+}
+
+static int parse(struct nlattr *na, cpumask_t *mask)
+{
+ char *data;
+ int len;
+ int ret;
+
+ if (na == NULL)
+ return 1;
+ len = nla_len(na);
+ if (len > TASKSTATS_CPUMASK_MAXLEN)
+ return -E2BIG;
+ if (len < 1)
+ return -EINVAL;
+ data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ nla_strlcpy(data, na, len);
+ ret = cpulist_parse(data, *mask);
+ kfree(data);
+ return ret;
+}
+
+static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
{
int rc = 0;
struct sk_buff *rep_skb;
void *reply;
size_t size;
struct nlattr *na;
+ cpumask_t mask;
+
+ rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return add_del_listener(info->snd_pid, &mask, REGISTER);
+
+ rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return add_del_listener(info->snd_pid, &mask, DEREGISTER);
/*
* Size includes space for nested attributes
nla_nest_end(rep_skb, na);
- return send_reply(rep_skb, info->snd_pid, TASKSTATS_MSG_UNICAST);
+ return send_reply(rep_skb, info->snd_pid);
nla_put_failure:
- return genlmsg_cancel(rep_skb, reply);
+ rc = genlmsg_cancel(rep_skb, reply);
err:
nlmsg_free(rep_skb);
return rc;
}
+void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
+{
+ struct listener_list *listeners;
+ struct taskstats *tmp;
+ /*
+ * This is the cpu on which the task is exiting currently and will
+ * be the one for which the exit event is sent, even if the cpu
+ * on which this function is running changes later.
+ */
+ *mycpu = raw_smp_processor_id();
+
+ *ptidstats = NULL;
+ tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
+ if (!tmp)
+ return;
+
+ listeners = &per_cpu(listener_array, *mycpu);
+ down_read(&listeners->sem);
+ if (!list_empty(&listeners->list)) {
+ *ptidstats = tmp;
+ tmp = NULL;
+ }
+ up_read(&listeners->sem);
+ kfree(tmp);
+}
+
/* Send pid data out on exit */
void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
- int group_dead)
+ int group_dead, unsigned int mycpu)
{
int rc;
struct sk_buff *rep_skb;
size_t size;
int is_thread_group;
struct nlattr *na;
- unsigned long flags;
- if (!family_registered || !tidstats)
+ if (!family_registered)
return;
- spin_lock_irqsave(&tsk->signal->stats_lock, flags);
- is_thread_group = tsk->signal->stats ? 1 : 0;
- spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
-
- rc = 0;
/*
* Size includes space for nested attributes
*/
size = nla_total_size(sizeof(u32)) +
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
- if (is_thread_group)
- size = 2 * size; /* PID + STATS + TGID + STATS */
+ is_thread_group = (tsk->signal->stats != NULL);
+ if (is_thread_group) {
+ /* PID + STATS + TGID + STATS */
+ size = 2 * size;
+ /* fill the tsk->signal->stats structure */
+ fill_tgid_exit(tsk);
+ }
+
+ if (!tidstats)
+ return;
rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
if (rc < 0)
goto send;
/*
- * tsk has/had a thread group so fill the tsk->signal->stats structure
* Doesn't matter if tsk is the leader or the last group member leaving
*/
-
- fill_tgid_exit(tsk);
if (!group_dead)
goto send;
nla_nest_end(rep_skb, na);
send:
- send_reply(rep_skb, 0, TASKSTATS_MSG_MULTICAST);
+ send_cpu_listeners(rep_skb, mycpu);
return;
nla_put_failure:
genlmsg_cancel(rep_skb, reply);
- goto ret;
err_skb:
nlmsg_free(rep_skb);
ret:
static struct genl_ops taskstats_ops = {
.cmd = TASKSTATS_CMD_GET,
- .doit = taskstats_send_stats,
+ .doit = taskstats_user_cmd,
.policy = taskstats_cmd_get_policy,
};
/* Needed early in initialization */
void __init taskstats_init_early(void)
{
+ unsigned int i;
+
taskstats_cache = kmem_cache_create("taskstats_cache",
sizeof(struct taskstats),
0, SLAB_PANIC, NULL, NULL);
+ for_each_possible_cpu(i) {
+ INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
+ init_rwsem(&(per_cpu(listener_array, i).sem));
+ }
}
static int __init taskstats_init(void)