]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 17 Feb 2009 22:27:39 +0000 (14:27 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 17 Feb 2009 22:27:39 +0000 (14:27 -0800)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, vm86: fix preemption bug
  x86, olpc: fix model detection without OFW
  x86, hpet: fix for LS21 + HPET = boot hang
  x86: CPA avoid repeated lazy mmu flush
  x86: warn if arch_flush_lazy_mmu_cpu is called in preemptible context
  x86/paravirt: make arch_flush_lazy_mmu/cpu disable preemption
  x86, pat: fix warn_on_once() while mapping 0-1MB range with /dev/mem
  x86/cpa: make sure cpa is safe to call in lazy mmu mode
  x86, ptrace, mm: fix double-free on race

1  2 
arch/x86/include/asm/paravirt.h
arch/x86/kernel/hpet.c
include/linux/mm.h
mm/mlock.c

index c09a1412758431271633f1af7793d708d59a24f2,a660eceaa2734643145936c1e5d971961f8925ab..e299287e8e3396d95b3169c5a9e9a1086e662a1a
@@@ -1352,14 -1352,7 +1352,7 @@@ static inline void arch_leave_lazy_cpu_
        PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
  }
  
- static inline void arch_flush_lazy_cpu_mode(void)
- {
-       if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
-               arch_leave_lazy_cpu_mode();
-               arch_enter_lazy_cpu_mode();
-       }
- }
+ void arch_flush_lazy_cpu_mode(void);
  
  #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  static inline void arch_enter_lazy_mmu_mode(void)
@@@ -1372,13 -1365,7 +1365,7 @@@ static inline void arch_leave_lazy_mmu_
        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
  }
  
- static inline void arch_flush_lazy_mmu_mode(void)
- {
-       if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
-               arch_leave_lazy_mmu_mode();
-               arch_enter_lazy_mmu_mode();
-       }
- }
+ void arch_flush_lazy_mmu_mode(void);
  
  static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
                                unsigned long phys, pgprot_t flags)
@@@ -1402,7 -1389,6 +1389,7 @@@ static inline int __raw_spin_is_contend
  {
        return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
  }
 +#define __raw_spin_is_contended       __raw_spin_is_contended
  
  static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
  {
diff --combined arch/x86/kernel/hpet.c
index 388254f69a2ad13721f87797d7aa7790126655df,5c8da2c2c185e260c7a0fe88c0588c9626302b45..a00545fe5cdd1dae730fe18031b631bd1f57d8d5
@@@ -269,6 -269,8 +269,8 @@@ static void hpet_set_mode(enum clock_ev
                now = hpet_readl(HPET_COUNTER);
                cmp = now + (unsigned long) delta;
                cfg = hpet_readl(HPET_Tn_CFG(timer));
+               /* Make sure we use edge triggered interrupts */
+               cfg &= ~HPET_TN_LEVEL;
                cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
                       HPET_TN_SETVAL | HPET_TN_32BIT;
                hpet_writel(cfg, HPET_Tn_CFG(timer));
@@@ -897,21 -899,13 +899,21 @@@ static unsigned long hpet_rtc_flags
  static int hpet_prev_update_sec;
  static struct rtc_time hpet_alarm_time;
  static unsigned long hpet_pie_count;
 -static unsigned long hpet_t1_cmp;
 +static u32 hpet_t1_cmp;
  static unsigned long hpet_default_delta;
  static unsigned long hpet_pie_delta;
  static unsigned long hpet_pie_limit;
  
  static rtc_irq_handler irq_handler;
  
 +/*
 + * Check that the hpet counter c1 is ahead of the c2
 + */
 +static inline int hpet_cnt_ahead(u32 c1, u32 c2)
 +{
 +      return (s32)(c2 - c1) < 0;
 +}
 +
  /*
   * Registers a IRQ handler.
   */
@@@ -1083,7 -1077,7 +1085,7 @@@ static void hpet_rtc_timer_reinit(void
                hpet_t1_cmp += delta;
                hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
                lost_ints++;
 -      } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
 +      } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
  
        if (lost_ints) {
                if (hpet_rtc_flags & RTC_PIE)
diff --combined include/linux/mm.h
index 323561582c100bf1b18c3a3b6084eda970f89eeb,3d7fb44d7d7ec295886c9805a00da1ef34038ba3..7dc04ff5ab89ab12acc55201875f36f03b0477fa
@@@ -1129,7 -1129,8 +1129,7 @@@ extern unsigned long do_mmap_pgoff(stru
        unsigned long flag, unsigned long pgoff);
  extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, unsigned long flags,
 -      unsigned int vm_flags, unsigned long pgoff,
 -      int accountable);
 +      unsigned int vm_flags, unsigned long pgoff);
  
  static inline unsigned long do_mmap(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
@@@ -1304,5 -1305,6 +1304,6 @@@ void vmemmap_populate_print_last(void)
  
  extern void *alloc_locked_buffer(size_t size);
  extern void free_locked_buffer(void *buffer, size_t size);
+ extern void release_locked_buffer(void *buffer, size_t size);
  #endif /* __KERNEL__ */
  #endif /* _LINUX_MM_H */
diff --combined mm/mlock.c
index 037161d61b4e72432d9d6bae71507d251b522afc,2b57f7e603906aec66aa7f8bdfd0861fd04ccb8f..cbe9e0581b75dcaf06335ccc017a68789d6247d2
@@@ -311,10 -311,7 +311,10 @@@ long mlock_vma_pages_range(struct vm_ar
                        is_vm_hugetlb_page(vma) ||
                        vma == get_gate_vma(current))) {
  
 -              return __mlock_vma_pages_range(vma, start, end, 1);
 +              __mlock_vma_pages_range(vma, start, end, 1);
 +
 +              /* Hide errors from mmap() and other callers */
 +              return 0;
        }
  
        /*
@@@ -660,7 -657,7 +660,7 @@@ void *alloc_locked_buffer(size_t size
        return buffer;
  }
  
- void free_locked_buffer(void *buffer, size_t size)
+ void release_locked_buffer(void *buffer, size_t size)
  {
        unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
  
        current->mm->locked_vm -= pgsz;
  
        up_write(&current->mm->mmap_sem);
+ }
+ void free_locked_buffer(void *buffer, size_t size)
+ {
+       release_locked_buffer(buffer, size);
  
        kfree(buffer);
  }