]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 Apr 2009 20:37:30 +0000 (13:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 Apr 2009 20:37:30 +0000 (13:37 -0700)
* 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  lockdep: add stack dumps to asserts
  hrtimer: fix rq->lock inversion (again)

1  2 
include/linux/interrupt.h
kernel/lockdep.c
kernel/sched.c
kernel/softirq.c

index ce2c07d99fc3a54934d140214cd46a0f7542937b,4528bf70866a24d320dad035c63d88e16c25d95b..8a9613d0c67401c2462df62ee9225bed14d00b53
@@@ -278,11 -278,6 +278,11 @@@ enu
        NR_SOFTIRQS
  };
  
 +/* map softirq index to softirq name. update 'softirq_to_name' in
 + * kernel/softirq.c when adding a new softirq.
 + */
 +extern char *softirq_to_name[NR_SOFTIRQS];
 +
  /* softirq mask and active fields moved to irq_cpustat_t in
   * asm/hardirq.h to get better cache usage.  KAO
   */
@@@ -299,6 -294,7 +299,7 @@@ extern void softirq_init(void)
  #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
  extern void raise_softirq_irqoff(unsigned int nr);
  extern void raise_softirq(unsigned int nr);
+ extern void wakeup_softirqd(void);
  
  /* This is the worklist that queues up per-cpu softirq work.
   *
diff --combined kernel/lockdep.c
index 81b5f33970b8b8c64e07414679a6ab5822fa44fe,a288ae107b50b24101e5ad193b8fdb4c5aa8a44d..b0f011866969acb7f187c3474e53551723d0486a
@@@ -42,7 -42,6 +42,7 @@@
  #include <linux/hash.h>
  #include <linux/ftrace.h>
  #include <linux/stringify.h>
 +#include <trace/lockdep.h>
  
  #include <asm/sections.h>
  
@@@ -793,6 -792,7 +793,7 @@@ register_lock_class(struct lockdep_map 
  
                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return NULL;
        }
        class = lock_classes + nr_lock_classes++;
@@@ -856,6 -856,7 +857,7 @@@ static struct lock_list *alloc_list_ent
  
                printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return NULL;
        }
        return list_entries + nr_list_entries++;
@@@ -1682,6 -1683,7 +1684,7 @@@ cache_hit
  
                printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
        chain = lock_chains + nr_lock_chains++;
@@@ -2541,6 -2543,7 +2544,7 @@@ static int __lock_acquire(struct lockde
                debug_locks_off();
                printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
  
                debug_locks_off();
                printk("BUG: MAX_LOCK_DEPTH too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
  
@@@ -2924,8 -2928,6 +2929,8 @@@ void lock_set_class(struct lockdep_map 
  }
  EXPORT_SYMBOL_GPL(lock_set_class);
  
 +DEFINE_TRACE(lock_acquire);
 +
  /*
   * We are not always called with irqs disabled - do that here,
   * and also avoid lockdep recursion:
@@@ -2936,8 -2938,6 +2941,8 @@@ void lock_acquire(struct lockdep_map *l
  {
        unsigned long flags;
  
 +      trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
 +
        if (unlikely(current->lockdep_recursion))
                return;
  
  }
  EXPORT_SYMBOL_GPL(lock_acquire);
  
 +DEFINE_TRACE(lock_release);
 +
  void lock_release(struct lockdep_map *lock, int nested,
                          unsigned long ip)
  {
        unsigned long flags;
  
 +      trace_lock_release(lock, nested, ip);
 +
        if (unlikely(current->lockdep_recursion))
                return;
  
@@@ -3109,14 -3105,10 +3114,14 @@@ found_it
        lock->ip = ip;
  }
  
 +DEFINE_TRACE(lock_contended);
 +
  void lock_contended(struct lockdep_map *lock, unsigned long ip)
  {
        unsigned long flags;
  
 +      trace_lock_contended(lock, ip);
 +
        if (unlikely(!lock_stat))
                return;
  
  }
  EXPORT_SYMBOL_GPL(lock_contended);
  
 +DEFINE_TRACE(lock_acquired);
 +
  void lock_acquired(struct lockdep_map *lock, unsigned long ip)
  {
        unsigned long flags;
  
 +      trace_lock_acquired(lock, ip);
 +
        if (unlikely(!lock_stat))
                return;
  
diff --combined kernel/sched.c
index bec249885e174c398c5e71aef05194579c9418dc,63256e3ede2a3480cd9fe3b27122dea44171cb63..6cc1fd5d5072b69638c562d7e01697d4c9870684
@@@ -231,13 -231,20 +231,20 @@@ static void start_rt_bandwidth(struct r
  
        spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
+               unsigned long delta;
+               ktime_t soft, hard;
                if (hrtimer_active(&rt_b->rt_period_timer))
                        break;
  
                now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
                hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
-               hrtimer_start_expires(&rt_b->rt_period_timer,
-                               HRTIMER_MODE_ABS);
+               soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
+               hard = hrtimer_get_expires(&rt_b->rt_period_timer);
+               delta = ktime_to_ns(ktime_sub(hard, soft));
+               __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
+                               HRTIMER_MODE_ABS, 0);
        }
        spin_unlock(&rt_b->rt_runtime_lock);
  }
@@@ -1110,7 -1117,7 +1117,7 @@@ static void hrtick_start(struct rq *rq
        if (rq == this_rq()) {
                hrtimer_restart(timer);
        } else if (!rq->hrtick_csd_pending) {
 -              __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
 +              __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
                rq->hrtick_csd_pending = 1;
        }
  }
@@@ -1146,7 -1153,8 +1153,8 @@@ static __init void init_hrtick(void
   */
  static void hrtick_start(struct rq *rq, u64 delay)
  {
-       hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
+       __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
+                       HRTIMER_MODE_REL, 0);
  }
  
  static inline void init_hrtick(void)
@@@ -3818,23 -3826,19 +3826,23 @@@ find_busiest_queue(struct sched_group *
   */
  #define MAX_PINNED_INTERVAL   512
  
 +/* Working cpumask for load_balance and load_balance_newidle. */
 +static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
 +
  /*
   * Check this_cpu to ensure it is balanced within domain. Attempt to move
   * tasks if there is an imbalance.
   */
  static int load_balance(int this_cpu, struct rq *this_rq,
                        struct sched_domain *sd, enum cpu_idle_type idle,
 -                      int *balance, struct cpumask *cpus)
 +                      int *balance)
  {
        int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
        struct sched_group *group;
        unsigned long imbalance;
        struct rq *busiest;
        unsigned long flags;
 +      struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  
        cpumask_setall(cpus);
  
@@@ -3989,7 -3993,8 +3997,7 @@@ out
   * this_rq is locked.
   */
  static int
 -load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
 -                      struct cpumask *cpus)
 +load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
  {
        struct sched_group *group;
        struct rq *busiest = NULL;
        int ld_moved = 0;
        int sd_idle = 0;
        int all_pinned = 0;
 +      struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  
        cpumask_setall(cpus);
  
@@@ -4138,6 -4142,10 +4146,6 @@@ static void idle_balance(int this_cpu, 
        struct sched_domain *sd;
        int pulled_task = 0;
        unsigned long next_balance = jiffies + HZ;
 -      cpumask_var_t tmpmask;
 -
 -      if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
 -              return;
  
        for_each_domain(this_cpu, sd) {
                unsigned long interval;
                if (sd->flags & SD_BALANCE_NEWIDLE)
                        /* If we've pulled tasks over stop searching: */
                        pulled_task = load_balance_newidle(this_cpu, this_rq,
 -                                                         sd, tmpmask);
 +                                                         sd);
  
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                 */
                this_rq->next_balance = next_balance;
        }
 -      free_cpumask_var(tmpmask);
  }
  
  /*
@@@ -4312,6 -4321,11 +4320,6 @@@ static void rebalance_domains(int cpu, 
        unsigned long next_balance = jiffies + 60*HZ;
        int update_next_balance = 0;
        int need_serialize;
 -      cpumask_var_t tmp;
 -
 -      /* Fails alloc?  Rebalancing probably not a priority right now. */
 -      if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
 -              return;
  
        for_each_domain(cpu, sd) {
                if (!(sd->flags & SD_LOAD_BALANCE))
                }
  
                if (time_after_eq(jiffies, sd->last_balance + interval)) {
 -                      if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
 +                      if (load_balance(cpu, rq, sd, idle, &balance)) {
                                /*
                                 * We've pulled tasks over so either we're no
                                 * longer idle, or one of our SMT siblings is
@@@ -4370,6 -4384,8 +4378,6 @@@ out
         */
        if (likely(update_next_balance))
                rq->next_balance = next_balance;
 -
 -      free_cpumask_var(tmp);
  }
  
  /*
@@@ -4773,7 -4789,10 +4781,7 @@@ void scheduler_tick(void
  #endif
  }
  
 -#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
 -                              defined(CONFIG_PREEMPT_TRACER))
 -
 -static inline unsigned long get_parent_ip(unsigned long addr)
 +unsigned long get_parent_ip(unsigned long addr)
  {
        if (in_lock_functions(addr)) {
                addr = CALLER_ADDR2;
        return addr;
  }
  
 +#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
 +                              defined(CONFIG_PREEMPT_TRACER))
 +
  void __kprobes add_preempt_count(int val)
  {
  #ifdef CONFIG_DEBUG_PREEMPT
@@@ -5188,17 -5204,11 +5196,17 @@@ void __wake_up_locked(wait_queue_head_
        __wake_up_common(q, mode, 1, 0, NULL);
  }
  
 +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
 +{
 +      __wake_up_common(q, mode, 1, 0, key);
 +}
 +
  /**
 - * __wake_up_sync - wake up threads blocked on a waitqueue.
 + * __wake_up_sync_key - wake up threads blocked on a waitqueue.
   * @q: the waitqueue
   * @mode: which threads
   * @nr_exclusive: how many wake-one or wake-many threads to wake up
 + * @key: opaque value to be passed to wakeup targets
   *
   * The sync wakeup differs that the waker knows that it will schedule
   * away soon, so while the target thread will be woken up, it will not
   *
   * On UP it can prevent extra preemption.
   */
 -void
 -__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
 +                      int nr_exclusive, void *key)
  {
        unsigned long flags;
        int sync = 1;
                sync = 0;
  
        spin_lock_irqsave(&q->lock, flags);
 -      __wake_up_common(q, mode, nr_exclusive, sync, NULL);
 +      __wake_up_common(q, mode, nr_exclusive, sync, key);
        spin_unlock_irqrestore(&q->lock, flags);
  }
 +EXPORT_SYMBOL_GPL(__wake_up_sync_key);
 +
 +/*
 + * __wake_up_sync - see __wake_up_sync_key()
 + */
 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 +{
 +      __wake_up_sync_key(q, mode, nr_exclusive, NULL);
 +}
  EXPORT_SYMBOL_GPL(__wake_up_sync);    /* For internal use only */
  
  /**
@@@ -7720,7 -7721,7 +7728,7 @@@ cpu_to_core_group(int cpu, const struc
  {
        int group;
  
 -      cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
 +      cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
        group = cpumask_first(mask);
        if (sg)
                *sg = &per_cpu(sched_group_core, group).sg;
@@@ -7749,7 -7750,7 +7757,7 @@@ cpu_to_phys_group(int cpu, const struc
        cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
        group = cpumask_first(mask);
  #elif defined(CONFIG_SCHED_SMT)
 -      cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
 +      cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
        group = cpumask_first(mask);
  #else
        group = cpu;
@@@ -8092,7 -8093,7 +8100,7 @@@ static int __build_sched_domains(const 
                SD_INIT(sd, SIBLING);
                set_domain_attribute(sd, attr);
                cpumask_and(sched_domain_span(sd),
 -                          &per_cpu(cpu_sibling_map, i), cpu_map);
 +                          topology_thread_cpumask(i), cpu_map);
                sd->parent = p;
                p->child = sd;
                cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
        /* Set up CPU (sibling) groups */
        for_each_cpu(i, cpu_map) {
                cpumask_and(this_sibling_map,
 -                          &per_cpu(cpu_sibling_map, i), cpu_map);
 +                          topology_thread_cpumask(i), cpu_map);
                if (i != cpumask_first(this_sibling_map))
                        continue;
  
@@@ -8778,9 -8779,6 +8786,9 @@@ void __init sched_init(void
  #endif
  #ifdef CONFIG_USER_SCHED
        alloc_size *= 2;
 +#endif
 +#ifdef CONFIG_CPUMASK_OFFSTACK
 +      alloc_size += num_possible_cpus() * cpumask_size();
  #endif
        /*
         * As sched_init() is called before page_alloc is setup,
                ptr += nr_cpu_ids * sizeof(void **);
  #endif /* CONFIG_USER_SCHED */
  #endif /* CONFIG_RT_GROUP_SCHED */
 +#ifdef CONFIG_CPUMASK_OFFSTACK
 +              for_each_possible_cpu(i) {
 +                      per_cpu(load_balance_tmpmask, i) = (void *)ptr;
 +                      ptr += cpumask_size();
 +              }
 +#endif /* CONFIG_CPUMASK_OFFSTACK */
        }
  
  #ifdef CONFIG_SMP
diff --combined kernel/softirq.c
index d105a82543d001029218240da411abf4893b587f,accc85197c496940b587fb4b7037676f98f6fa6a..2fecefacdc5bc56917c13369420625a919e73c14
  #include <linux/freezer.h>
  #include <linux/kthread.h>
  #include <linux/rcupdate.h>
 +#include <linux/ftrace.h>
  #include <linux/smp.h>
  #include <linux/tick.h>
 +#include <trace/irq.h>
  
  #include <asm/irq.h>
  /*
@@@ -54,18 -52,13 +54,18 @@@ static struct softirq_action softirq_ve
  
  static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
  
 +char *softirq_to_name[NR_SOFTIRQS] = {
 +      "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
 +      "TASKLET", "SCHED", "HRTIMER",  "RCU"
 +};
 +
  /*
   * we cannot loop indefinitely here to avoid userspace starvation,
   * but we also don't want to introduce a worst case 1/HZ latency
   * to the pending events, so lets the scheduler to balance
   * the softirq load for us.
   */
static inline void wakeup_softirqd(void)
+ void wakeup_softirqd(void)
  {
        /* Interrupts are disabled: no need to stop preemption */
        struct task_struct *tsk = __get_cpu_var(ksoftirqd);
@@@ -86,23 -79,13 +86,23 @@@ static void __local_bh_disable(unsigne
        WARN_ON_ONCE(in_irq());
  
        raw_local_irq_save(flags);
 -      add_preempt_count(SOFTIRQ_OFFSET);
 +      /*
 +       * The preempt tracer hooks into add_preempt_count and will break
 +       * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
 +       * is set and before current->softirq_enabled is cleared.
 +       * We must manually increment preempt_count here and manually
 +       * call the trace_preempt_off later.
 +       */
 +      preempt_count() += SOFTIRQ_OFFSET;
        /*
         * Were softirqs turned off above:
         */
        if (softirq_count() == SOFTIRQ_OFFSET)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
 +
 +      if (preempt_count() == SOFTIRQ_OFFSET)
 +              trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  }
  #else /* !CONFIG_TRACE_IRQFLAGS */
  static inline void __local_bh_disable(unsigned long ip)
@@@ -186,9 -169,6 +186,9 @@@ EXPORT_SYMBOL(local_bh_enable_ip)
   */
  #define MAX_SOFTIRQ_RESTART 10
  
 +DEFINE_TRACE(softirq_entry);
 +DEFINE_TRACE(softirq_exit);
 +
  asmlinkage void __do_softirq(void)
  {
        struct softirq_action *h;
@@@ -215,14 -195,12 +215,14 @@@ restart
                if (pending & 1) {
                        int prev_count = preempt_count();
  
 +                      trace_softirq_entry(h, softirq_vec);
                        h->action(h);
 -
 +                      trace_softirq_exit(h, softirq_vec);
                        if (unlikely(prev_count != preempt_count())) {
 -                              printk(KERN_ERR "huh, entered softirq %td %p"
 +                              printk(KERN_ERR "huh, entered softirq %td %s %p"
                                       "with preempt_count %08x,"
                                       " exited with %08x?\n", h - softirq_vec,
 +                                     softirq_to_name[h - softirq_vec],
                                       h->action, prev_count, preempt_count());
                                preempt_count() = prev_count;
                        }
@@@ -518,7 -496,7 +518,7 @@@ static int __try_remote_softirq(struct 
                cp->flags = 0;
                cp->priv = softirq;
  
 -              __smp_call_function_single(cpu, cp);
 +              __smp_call_function_single(cpu, cp, 0);
                return 0;
        }
        return 1;