X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=kernel%2Fsoftlockup.c;h=c1d76552446e592bc132d8de37073ecb589a1629;hb=50d9a126240f9961cfdd063336bbeb91f77a7dce;hp=708d4882c0c391bffa47a24ae0153bccabd0a1c7;hpb=492559af235eb56884d62553f191c0b5c4def990;p=linux-2.6-omap-h63xx.git diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 708d4882c0c..c1d76552446 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include @@ -15,13 +16,16 @@ #include #include +#include + static DEFINE_SPINLOCK(print_lock); static DEFINE_PER_CPU(unsigned long, touch_timestamp); static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); -static int did_panic = 0; +static int __read_mostly did_panic; +unsigned long __read_mostly softlockup_thresh = 60; static int softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) @@ -40,14 +44,16 @@ static struct notifier_block panic_block = { * resolution, and we don't need to waste time with a big divide when * 2^30ns == 1.074s. */ -static unsigned long get_timestamp(void) +static unsigned long get_timestamp(int this_cpu) { - return sched_clock() >> 30; /* 2^30 ~= 10^9 */ + return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } void touch_softlockup_watchdog(void) { - __raw_get_cpu_var(touch_timestamp) = get_timestamp(); + int this_cpu = raw_smp_processor_id(); + + __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); } EXPORT_SYMBOL(touch_softlockup_watchdog); @@ -70,6 +76,7 @@ void softlockup_tick(void) int this_cpu = smp_processor_id(); unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); unsigned long print_timestamp; + struct pt_regs *regs = get_irq_regs(); unsigned long now; if (touch_timestamp == 0) { @@ -80,10 +87,11 @@ void softlockup_tick(void) print_timestamp = per_cpu(print_timestamp, this_cpu); /* report at most once a second */ - if (print_timestamp < (touch_timestamp + 1) || - did_panic || - !per_cpu(watchdog_task, this_cpu)) + if ((print_timestamp >= touch_timestamp && + print_timestamp < (touch_timestamp + 1)) || + did_panic || !per_cpu(watchdog_task, this_cpu)) { return; + } /* do not print during early bootup: */ if (unlikely(system_state != SYSTEM_RUNNING)) { @@ -91,30 +99,113 @@ void softlockup_tick(void) return; } - now = get_timestamp(); + now = get_timestamp(this_cpu); - /* Wake up the high-prio watchdog task every second: */ - if (now > (touch_timestamp + 1)) - wake_up_process(per_cpu(watchdog_task, this_cpu)); + /* Warn about unreasonable delays: */ + if (now <= (touch_timestamp + softlockup_thresh)) + return; - /* Warn about unreasonable 10+ seconds delays: */ - if (now > (touch_timestamp + 10)) { - per_cpu(print_timestamp, this_cpu) = touch_timestamp; + per_cpu(print_timestamp, this_cpu) = touch_timestamp; - spin_lock(&print_lock); - printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n", - this_cpu); + spin_lock(&print_lock); + printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", + this_cpu, now - touch_timestamp, + current->comm, task_pid_nr(current)); + if (regs) + show_regs(regs); + else dump_stack(); - spin_unlock(&print_lock); + spin_unlock(&print_lock); +} + +/* + * Have a reasonable limit on the number of tasks checked: + */ +unsigned long __read_mostly sysctl_hung_task_check_count = 1024; + +/* + * Zero means infinite timeout - no checking done: + */ +unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; + +unsigned long __read_mostly sysctl_hung_task_warnings = 10; + +/* + * Only do the hung-tasks check on one CPU: + */ +static int check_cpu __read_mostly = -1; + +static void check_hung_task(struct task_struct *t, unsigned long now) +{ + unsigned long switch_count = t->nvcsw + t->nivcsw; + + if (t->flags & PF_FROZEN) + return; + + if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { + t->last_switch_count = switch_count; + t->last_switch_timestamp = now; + return; } + if ((long)(now - t->last_switch_timestamp) < + sysctl_hung_task_timeout_secs) + return; + if (sysctl_hung_task_warnings < 0) + return; + sysctl_hung_task_warnings--; + + /* + * Ok, the task did not get scheduled for more than 2 minutes, + * complain: + */ + printk(KERN_ERR "INFO: task %s:%d blocked for more than " + "%ld seconds.\n", t->comm, t->pid, + sysctl_hung_task_timeout_secs); + printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" + " disables this message.\n"); + sched_show_task(t); + __debug_show_held_locks(t); + + t->last_switch_timestamp = now; + touch_nmi_watchdog(); +} + +/* + * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for + * a really long time (120 seconds). If that happens, print out + * a warning. + */ +static void check_hung_uninterruptible_tasks(int this_cpu) +{ + int max_count = sysctl_hung_task_check_count; + unsigned long now = get_timestamp(this_cpu); + struct task_struct *g, *t; + + /* + * If the system crashed already then all bets are off, + * do not report extra hung tasks: + */ + if ((tainted & TAINT_DIE) || did_panic) + return; + + read_lock(&tasklist_lock); + do_each_thread(g, t) { + if (!--max_count) + break; + if (t->state & TASK_UNINTERRUPTIBLE) + check_hung_task(t, now); + } while_each_thread(g, t); + + read_unlock(&tasklist_lock); } /* * The watchdog thread - runs every second and touches the timestamp. */ -static int watchdog(void * __bind_cpu) +static int watchdog(void *__bind_cpu) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + int this_cpu = (long)__bind_cpu; sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -123,13 +214,18 @@ static int watchdog(void * __bind_cpu) /* * Run briefly once per second to reset the softlockup timestamp. - * If this gets delayed for more than 10 seconds then the + * If this gets delayed for more than 60 seconds then the * debug-printout triggers in softlockup_tick(). */ while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); touch_softlockup_watchdog(); - schedule(); + msleep_interruptible(10000); + + if (this_cpu != check_cpu) + continue; + + if (sysctl_hung_task_timeout_secs) + check_hung_uninterruptible_tasks(this_cpu); } return 0; @@ -150,15 +246,16 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) BUG_ON(per_cpu(watchdog_task, hotcpu)); p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); if (IS_ERR(p)) { - printk("watchdog for %i failed\n", hotcpu); + printk(KERN_ERR "watchdog for %i failed\n", hotcpu); return NOTIFY_BAD; } - per_cpu(touch_timestamp, hotcpu) = 0; - per_cpu(watchdog_task, hotcpu) = p; + per_cpu(touch_timestamp, hotcpu) = 0; + per_cpu(watchdog_task, hotcpu) = p; kthread_bind(p, hotcpu); - break; + break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: + check_cpu = any_online_cpu(cpu_online_map); wake_up_process(per_cpu(watchdog_task, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU @@ -169,6 +266,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(watchdog_task, hotcpu), any_online_cpu(cpu_online_map)); + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + if (hotcpu == check_cpu) { + cpumask_t temp_cpu_online_map = cpu_online_map; + + cpu_clear(hotcpu, temp_cpu_online_map); + check_cpu = any_online_cpu(temp_cpu_online_map); + } + break; case CPU_DEAD: case CPU_DEAD_FROZEN: p = per_cpu(watchdog_task, hotcpu); @@ -176,7 +282,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) kthread_stop(p); break; #endif /* CONFIG_HOTPLUG_CPU */ - } + } return NOTIFY_OK; }