]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/trace/ftrace.c
tracing: do not allow modifying the ftrace events via the event files
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
index 157d4f68b0e07335e048840eecd302db797c53c8..d33d306bdcf4648ab9daabeabb803828c4864cb1 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/sysctl.h>
 #include <linux/ctype.h>
 #include <linux/list.h>
+#include <linux/hash.h>
 
 #include <asm/ftrace.h>
 
                        ftrace_kill();          \
        } while (0)
 
+/* hash bits for specific function selection */
+#define FTRACE_HASH_BITS 7
+#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* set when tracing only a pid */
-struct pid *ftrace_pid_trace;
-static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-
 /* Quick disabling of function tracer. */
 int function_trace_stop;
 
@@ -217,10 +218,8 @@ static void ftrace_update_pid_func(void)
 {
        ftrace_func_t func;
 
-       mutex_lock(&ftrace_lock);
-
        if (ftrace_trace_function == ftrace_stub)
-               goto out;
+               return;
 
        func = ftrace_trace_function;
 
@@ -237,16 +236,30 @@ static void ftrace_update_pid_func(void)
 #else
        __ftrace_trace_function = func;
 #endif
-
- out:
-       mutex_unlock(&ftrace_lock);
 }
 
+/* set when tracing only a pid */
+struct pid *ftrace_pid_trace;
+static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
 #ifdef CONFIG_DYNAMIC_FTRACE
+
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
 
+static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
+
+struct ftrace_func_probe {
+       struct hlist_node       node;
+       struct ftrace_probe_ops *ops;
+       unsigned long           flags;
+       unsigned long           ip;
+       void                    *data;
+       struct rcu_head         rcu;
+};
+
+
 enum {
        FTRACE_ENABLE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
@@ -442,8 +455,8 @@ static void ftrace_bug(int failed, unsigned long ip)
 static int
 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 {
-       unsigned long ip, fl;
        unsigned long ftrace_addr;
+       unsigned long ip, fl;
 
        ftrace_addr = (unsigned long)FTRACE_ADDR;
 
@@ -512,9 +525,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 
 static void ftrace_replace_code(int enable)
 {
-       int failed;
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int failed;
 
        do_for_each_ftrace_rec(pg, rec) {
                /*
@@ -539,8 +552,11 @@ static void ftrace_replace_code(int enable)
                        if ((system_state == SYSTEM_BOOTING) ||
                            !core_kernel_text(rec->ip)) {
                                ftrace_free_rec(rec);
-                       } else
+                               } else {
                                ftrace_bug(failed, rec->ip);
+                                       /* Stop processing */
+                                       return;
+                               }
                }
        } while_for_each_ftrace_rec();
 }
@@ -562,6 +578,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
        return 1;
 }
 
+/*
+ * archs can override this function if they must do something
+ * before the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_prepare(void)
+{
+       return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * after the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_post_process(void)
+{
+       return 0;
+}
+
 static int __ftrace_modify_code(void *data)
 {
        int *command = data;
@@ -584,7 +618,17 @@ static int __ftrace_modify_code(void *data)
 
 static void ftrace_run_update_code(int command)
 {
+       int ret;
+
+       ret = ftrace_arch_code_modify_prepare();
+       FTRACE_WARN_ON(ret);
+       if (ret)
+               return;
+
        stop_machine(__ftrace_modify_code, &command, NULL);
+
+       ret = ftrace_arch_code_modify_post_process();
+       FTRACE_WARN_ON(ret);
 }
 
 static ftrace_func_t saved_ftrace_func;
@@ -749,12 +793,14 @@ enum {
        FTRACE_ITER_NOTRACE     = (1 << 2),
        FTRACE_ITER_FAILURES    = (1 << 3),
        FTRACE_ITER_PRINTALL    = (1 << 4),
+       FTRACE_ITER_HASH        = (1 << 5),
 };
 
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
 
 struct ftrace_iterator {
        struct ftrace_page      *pg;
+       int                     hidx;
        int                     idx;
        unsigned                flags;
        unsigned char           buffer[FTRACE_BUFF_MAX+1];
@@ -762,18 +808,90 @@ struct ftrace_iterator {
        unsigned                filtered;
 };
 
+static void *
+t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       struct hlist_node *hnd = v;
+       struct hlist_head *hhd;
+
+       WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
+
+       (*pos)++;
+
+ retry:
+       if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+               return NULL;
+
+       hhd = &ftrace_func_hash[iter->hidx];
+
+       if (hlist_empty(hhd)) {
+               iter->hidx++;
+               hnd = NULL;
+               goto retry;
+       }
+
+       if (!hnd)
+               hnd = hhd->first;
+       else {
+               hnd = hnd->next;
+               if (!hnd) {
+                       iter->hidx++;
+                       goto retry;
+               }
+       }
+
+       return hnd;
+}
+
+static void *t_hash_start(struct seq_file *m, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       void *p = NULL;
+
+       iter->flags |= FTRACE_ITER_HASH;
+
+       return t_hash_next(m, p, pos);
+}
+
+static int t_hash_show(struct seq_file *m, void *v)
+{
+       struct ftrace_func_probe *rec;
+       struct hlist_node *hnd = v;
+       char str[KSYM_SYMBOL_LEN];
+
+       rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+       if (rec->ops->print)
+               return rec->ops->print(m, rec->ip, rec->ops, rec->data);
+
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       seq_printf(m, "%s:", str);
+
+       kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
+       seq_printf(m, "%s", str);
+
+       if (rec->data)
+               seq_printf(m, ":%p", rec->data);
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
 static void *
 t_next(struct seq_file *m, void *v, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        struct dyn_ftrace *rec = NULL;
 
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_next(m, v, pos);
+
        (*pos)++;
 
        if (iter->flags & FTRACE_ITER_PRINTALL)
                return NULL;
 
-       mutex_lock(&ftrace_lock);
  retry:
        if (iter->idx >= iter->pg->index) {
                if (iter->pg->next) {
@@ -802,7 +920,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                        goto retry;
                }
        }
-       mutex_unlock(&ftrace_lock);
 
        return rec;
 }
@@ -812,6 +929,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
 
+       mutex_lock(&ftrace_lock);
        /*
         * For set_ftrace_filter reading, if we have the filter
         * off, we can short cut and just print out that all
@@ -819,12 +937,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
         */
        if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
                if (*pos > 0)
-                       return NULL;
+                       return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
                (*pos)++;
                return iter;
        }
 
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_start(m, pos);
+
        if (*pos > 0) {
                if (iter->idx < 0)
                        return p;
@@ -834,11 +955,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 
        p = t_next(m, p, pos);
 
+       if (!p)
+               return t_hash_start(m, pos);
+
        return p;
 }
 
 static void t_stop(struct seq_file *m, void *p)
 {
+       mutex_unlock(&ftrace_lock);
 }
 
 static int t_show(struct seq_file *m, void *v)
@@ -847,6 +972,9 @@ static int t_show(struct seq_file *m, void *v)
        struct dyn_ftrace *rec = v;
        char str[KSYM_SYMBOL_LEN];
 
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_show(m, v);
+
        if (iter->flags & FTRACE_ITER_PRINTALL) {
                seq_printf(m, "#### all functions enabled ####\n");
                return 0;
@@ -1106,14 +1234,15 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
 
 static void ftrace_match_records(char *buff, int len, int enable)
 {
-       char *search;
+       unsigned int search_len;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
+       unsigned long flag;
+       char *search;
        int type;
-       unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-       unsigned search_len;
        int not;
 
+       flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
        type = ftrace_setup_glob(buff, len, &search, &not);
 
        search_len = strlen(search);
@@ -1161,14 +1290,16 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
 
 static void ftrace_match_module_records(char *buff, char *mod, int enable)
 {
-       char *search = buff;
+       unsigned search_len = 0;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
        int type = MATCH_FULL;
-       unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-       unsigned search_len = 0;
+       char *search = buff;
+       unsigned long flag;
        int not = 0;
 
+       flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+
        /* blank or '*' mean the same */
        if (strcmp(buff, "*") == 0)
                buff[0] = 0;
@@ -1245,6 +1376,239 @@ static int __init ftrace_mod_cmd_init(void)
 }
 device_initcall(ftrace_mod_cmd_init);
 
+static void
+function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_func_probe *entry;
+       struct hlist_head *hhd;
+       struct hlist_node *n;
+       unsigned long key;
+       int resched;
+
+       key = hash_long(ip, FTRACE_HASH_BITS);
+
+       hhd = &ftrace_func_hash[key];
+
+       if (hlist_empty(hhd))
+               return;
+
+       /*
+        * Disable preemption for these calls to prevent a RCU grace
+        * period. This syncs the hash iteration and freeing of items
+        * on the hash. rcu_read_lock is too dangerous here.
+        */
+       resched = ftrace_preempt_disable();
+       hlist_for_each_entry_rcu(entry, n, hhd, node) {
+               if (entry->ip == ip)
+                       entry->ops->func(ip, parent_ip, &entry->data);
+       }
+       ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_probe_ops __read_mostly =
+{
+       .func = function_trace_probe_call,
+};
+
+static int ftrace_probe_registered;
+
+static void __enable_ftrace_function_probe(void)
+{
+       int i;
+
+       if (ftrace_probe_registered)
+               return;
+
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+               if (hhd->first)
+                       break;
+       }
+       /* Nothing registered? */
+       if (i == FTRACE_FUNC_HASHSIZE)
+               return;
+
+       __register_ftrace_function(&trace_probe_ops);
+       ftrace_startup(0);
+       ftrace_probe_registered = 1;
+}
+
+static void __disable_ftrace_function_probe(void)
+{
+       int i;
+
+       if (!ftrace_probe_registered)
+               return;
+
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+               if (hhd->first)
+                       return;
+       }
+
+       /* no more funcs left */
+       __unregister_ftrace_function(&trace_probe_ops);
+       ftrace_shutdown(0);
+       ftrace_probe_registered = 0;
+}
+
+
+static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+{
+       struct ftrace_func_probe *entry =
+               container_of(rhp, struct ftrace_func_probe, rcu);
+
+       if (entry->ops->free)
+               entry->ops->free(&entry->data);
+       kfree(entry);
+}
+
+
+int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                             void *data)
+{
+       struct ftrace_func_probe *entry;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int type, len, not;
+       unsigned long key;
+       int count = 0;
+       char *search;
+
+       type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+       len = strlen(search);
+
+       /* we do not support '!' for function probes */
+       if (WARN_ON(not))
+               return -EINVAL;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+
+               if (!ftrace_match_record(rec, search, len, type))
+                       continue;
+
+               entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry) {
+                       /* If we did not process any, then return error */
+                       if (!count)
+                               count = -ENOMEM;
+                       goto out_unlock;
+               }
+
+               count++;
+
+               entry->data = data;
+
+               /*
+                * The caller might want to do something special
+                * for each function we find. We call the callback
+                * to give the caller an opportunity to do so.
+                */
+               if (ops->callback) {
+                       if (ops->callback(rec->ip, &entry->data) < 0) {
+                               /* caller does not like this func */
+                               kfree(entry);
+                               continue;
+                       }
+               }
+
+               entry->ops = ops;
+               entry->ip = rec->ip;
+
+               key = hash_long(entry->ip, FTRACE_HASH_BITS);
+               hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
+
+       } while_for_each_ftrace_rec();
+       __enable_ftrace_function_probe();
+
+ out_unlock:
+       mutex_unlock(&ftrace_lock);
+
+       return count;
+}
+
+enum {
+       PROBE_TEST_FUNC         = 1,
+       PROBE_TEST_DATA         = 2
+};
+
+static void
+__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                                 void *data, int flags)
+{
+       struct ftrace_func_probe *entry;
+       struct hlist_node *n, *tmp;
+       char str[KSYM_SYMBOL_LEN];
+       int type = MATCH_FULL;
+       int i, len = 0;
+       char *search;
+
+       if (glob && (strcmp(glob, "*") || !strlen(glob)))
+               glob = NULL;
+       else {
+               int not;
+
+               type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+               len = strlen(search);
+
+               /* we do not support '!' for function probes */
+               if (WARN_ON(not))
+                       return;
+       }
+
+       mutex_lock(&ftrace_lock);
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+
+               hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+
+                       /* break up if statements for readability */
+                       if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
+                               continue;
+
+                       if ((flags & PROBE_TEST_DATA) && entry->data != data)
+                               continue;
+
+                       /* do this last, since it is the most expensive */
+                       if (glob) {
+                               kallsyms_lookup(entry->ip, NULL, NULL,
+                                               NULL, str);
+                               if (!ftrace_match(str, glob, len, type))
+                                       continue;
+                       }
+
+                       hlist_del(&entry->node);
+                       call_rcu(&entry->rcu, ftrace_free_entry_rcu);
+               }
+       }
+       __disable_ftrace_function_probe();
+       mutex_unlock(&ftrace_lock);
+}
+
+void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                               void *data)
+{
+       __unregister_ftrace_function_probe(glob, ops, data,
+                                         PROBE_TEST_FUNC | PROBE_TEST_DATA);
+}
+
+void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
+{
+       __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
+}
+
+void unregister_ftrace_function_probe_all(char *glob)
+{
+       __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
+}
+
 static LIST_HEAD(ftrace_commands);
 static DEFINE_MUTEX(ftrace_cmd_mutex);
 
@@ -1288,8 +1652,8 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
 
 static int ftrace_process_regex(char *buff, int len, int enable)
 {
-       struct ftrace_func_command *p;
        char *func, *command, *next = buff;
+       struct ftrace_func_command *p;
        int ret = -EINVAL;
 
        func = strsep(&next, ":");
@@ -1500,21 +1864,21 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
        return ftrace_regex_release(inode, file, 0);
 }
 
-static struct file_operations ftrace_avail_fops = {
+static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_avail_open,
        .read = seq_read,
        .llseek = seq_lseek,
        .release = ftrace_avail_release,
 };
 
-static struct file_operations ftrace_failures_fops = {
+static const struct file_operations ftrace_failures_fops = {
        .open = ftrace_failures_open,
        .read = seq_read,
        .llseek = seq_lseek,
        .release = ftrace_avail_release,
 };
 
-static struct file_operations ftrace_filter_fops = {
+static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = ftrace_regex_read,
        .write = ftrace_filter_write,
@@ -1522,7 +1886,7 @@ static struct file_operations ftrace_filter_fops = {
        .release = ftrace_filter_release,
 };
 
-static struct file_operations ftrace_notrace_fops = {
+static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = ftrace_regex_read,
        .write = ftrace_notrace_write,
@@ -1557,6 +1921,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)
 
        mutex_lock(&graph_lock);
 
+       /* Nothing, tell g_show to print all functions are enabled */
+       if (!ftrace_graph_count && !*pos)
+               return (void *)1;
+
        p = g_next(m, p, pos);
 
        return p;
@@ -1575,6 +1943,11 @@ static int g_show(struct seq_file *m, void *v)
        if (!ptr)
                return 0;
 
+       if (ptr == (unsigned long *)1) {
+               seq_printf(m, "#### all functions enabled ####\n");
+               return 0;
+       }
+
        kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
 
        seq_printf(m, "%s\n", str);
@@ -1628,38 +2001,51 @@ ftrace_graph_read(struct file *file, char __user *ubuf,
 }
 
 static int
-ftrace_set_func(unsigned long *array, int idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 {
-       char str[KSYM_SYMBOL_LEN];
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int search_len;
        int found = 0;
-       int j;
+       int type, not;
+       char *search;
+       bool exists;
+       int i;
 
        if (ftrace_disabled)
                return -ENODEV;
 
+       /* decode regex */
+       type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+       if (not)
+               return -EINVAL;
+
+       search_len = strlen(search);
+
        mutex_lock(&ftrace_lock);
        do_for_each_ftrace_rec(pg, rec) {
 
+               if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+                       break;
+
                if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
                        continue;
 
-               kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-               if (strcmp(str, buffer) == 0) {
-                       /* Return 1 if we add it to the array */
-                       found = 1;
-                       for (j = 0; j < idx; j++)
-                               if (array[j] == rec->ip) {
-                                       found = 0;
+               if (ftrace_match_record(rec, search, search_len, type)) {
+                       /* ensure it is not already in the array */
+                       exists = false;
+                       for (i = 0; i < *idx; i++)
+                               if (array[i] == rec->ip) {
+                                       exists = true;
                                        break;
                                }
-                       if (found)
-                               array[idx] = rec->ip;
-                       goto out;
+                       if (!exists) {
+                               array[(*idx)++] = rec->ip;
+                               found = 1;
+                       }
                }
        } while_for_each_ftrace_rec();
- out:
+
        mutex_unlock(&ftrace_lock);
 
        return found ? 0 : -EINVAL;
@@ -1728,13 +2114,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
        }
        buffer[index] = 0;
 
-       /* we allow only one at a time */
-       ret = ftrace_set_func(array, ftrace_graph_count, buffer);
+       /* we allow only one expression at a time */
+       ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
        if (ret)
                goto out;
 
-       ftrace_graph_count++;
-
        file->f_pos += read;
 
        ret = read;
@@ -2034,7 +2418,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static struct file_operations ftrace_pid_fops = {
+static const struct file_operations ftrace_pid_fops = {
        .read = ftrace_pid_read,
        .write = ftrace_pid_write,
 };
@@ -2057,7 +2441,6 @@ static __init int ftrace_init_debugfs(void)
                           "'set_ftrace_pid' entry\n");
        return 0;
 }
-
 fs_initcall(ftrace_init_debugfs);
 
 /**
@@ -2227,7 +2610,7 @@ free:
 static int start_graph_tracing(void)
 {
        struct ftrace_ret_stack **ret_stack_list;
-       int ret;
+       int ret, cpu;
 
        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
                                sizeof(struct ftrace_ret_stack *),
@@ -2236,6 +2619,10 @@ static int start_graph_tracing(void)
        if (!ret_stack_list)
                return -ENOMEM;
 
+       /* The cpu_boot init_task->ret_stack will never be freed */
+       for_each_online_cpu(cpu)
+               ftrace_graph_init_task(idle_task(cpu));
+
        do {
                ret = alloc_retstack_tasklist(ret_stack_list);
        } while (ret == -EAGAIN);