]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memory.c
i2400m/SDIO: probe/disconnect, dev init/shutdown and reset backends
[linux-2.6-omap-h63xx.git] / mm / memory.c
index 122d965e820fe4420281ade76697c5b0d7d0cac4..3f8fa06b963b281191ac2c1350af0b21b2dad7c5 100644 (file)
@@ -52,6 +52,9 @@
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
+#include <linux/kallsyms.h>
+#include <linux/swapops.h>
+#include <linux/elf.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -59,9 +62,6 @@
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
-#include <linux/swapops.h>
-#include <linux/elf.h>
-
 #include "internal.h"
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -375,15 +375,65 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  *
  * The calling function must still handle the error.
  */
-static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
-                         unsigned long vaddr)
-{
-       printk(KERN_ERR "Bad pte = %08llx, process = %s, "
-                       "vm_flags = %lx, vaddr = %lx\n",
-               (long long)pte_val(pte),
-               (vma->vm_mm == current->mm ? current->comm : "???"),
-               vma->vm_flags, vaddr);
+static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+                         pte_t pte, struct page *page)
+{
+       pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+       struct address_space *mapping;
+       pgoff_t index;
+       static unsigned long resume;
+       static unsigned long nr_shown;
+       static unsigned long nr_unshown;
+
+       /*
+        * Allow a burst of 60 reports, then keep quiet for that minute;
+        * or allow a steady drip of one report per second.
+        */
+       if (nr_shown == 60) {
+               if (time_before(jiffies, resume)) {
+                       nr_unshown++;
+                       return;
+               }
+               if (nr_unshown) {
+                       printk(KERN_ALERT
+                               "BUG: Bad page map: %lu messages suppressed\n",
+                               nr_unshown);
+                       nr_unshown = 0;
+               }
+               nr_shown = 0;
+       }
+       if (nr_shown++ == 0)
+               resume = jiffies + 60 * HZ;
+
+       mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
+       index = linear_page_index(vma, addr);
+
+       printk(KERN_ALERT
+               "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
+               current->comm,
+               (long long)pte_val(pte), (long long)pmd_val(*pmd));
+       if (page) {
+               printk(KERN_ALERT
+               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
+               page, (void *)page->flags, page_count(page),
+               page_mapcount(page), page->mapping, page->index);
+       }
+       printk(KERN_ALERT
+               "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+               (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
+       /*
+        * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+        */
+       if (vma->vm_ops)
+               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
+                               (unsigned long)vma->vm_ops->fault);
+       if (vma->vm_file && vma->vm_file->f_op)
+               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
+                               (unsigned long)vma->vm_file->f_op->mmap);
        dump_stack();
+       add_taint(TAINT_BAD_PAGE);
 }
 
 static inline int is_cow_mapping(unsigned int flags)
@@ -441,21 +491,18 @@ static inline int is_cow_mapping(unsigned int flags)
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                                pte_t pte)
 {
-       unsigned long pfn;
+       unsigned long pfn = pte_pfn(pte);
 
        if (HAVE_PTE_SPECIAL) {
-               if (likely(!pte_special(pte))) {
-                       VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-                       return pte_page(pte);
-               }
-               VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+               if (likely(!pte_special(pte)))
+                       goto check_pfn;
+               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+                       print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
 
        /* !HAVE_PTE_SPECIAL case follows: */
 
-       pfn = pte_pfn(pte);
-
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
                        if (!pfn_valid(pfn))
@@ -471,11 +518,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
-       VM_BUG_ON(!pfn_valid(pfn));
+check_pfn:
+       if (unlikely(pfn > highest_memmap_pfn)) {
+               print_bad_pte(vma, addr, pte, NULL);
+               return NULL;
+       }
 
        /*
         * NOTE! We still have PageReserved() pages in the page tables.
-        *
         * eg. VDSO mappings can cause them to exist.
         */
 out:
@@ -772,7 +822,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                        mark_page_accessed(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
+                       if (unlikely(page_mapcount(page) < 0))
+                               print_bad_pte(vma, addr, ptent, page);
                        tlb_remove_page(tlb, page);
                        continue;
                }
@@ -782,8 +834,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                 */
                if (unlikely(details))
                        continue;
-               if (!pte_file(ptent))
-                       free_swap_and_cache(pte_to_swp_entry(ptent));
+               if (pte_file(ptent)) {
+                       if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               } else if
+                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
+                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
@@ -1154,6 +1210,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        int write = !!(flags & GUP_FLAGS_WRITE);
        int force = !!(flags & GUP_FLAGS_FORCE);
        int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
+       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
 
        if (len <= 0)
                return 0;
@@ -1232,12 +1289,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        struct page *page;
 
                        /*
-                        * If tsk is ooming, cut off its access to large memory
-                        * allocations. It has a pending SIGKILL, but it can't
-                        * be processed until returning to user space.
+                        * If we have a pending SIGKILL, don't keep faulting
+                        * pages and potentially allocating memory, unless
+                        * current is handling munlock--e.g., on exit. In
+                        * that case, we are not allocating memory.  Rather,
+                        * we're only unlocking already resident/mapped pages.
                         */
-                       if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return i ? i : -ENOMEM;
+                       if (unlikely(!ignore_sigkill &&
+                                       fatal_signal_pending(current)))
+                               return i ? i : -ERESTARTSYS;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1264,9 +1324,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
-                                * page lookups as if they were reads.
+                                * page lookups as if they were reads. But only
+                                * do so when looping for pte_write is futile:
+                                * in some cases userspace may also be wanting
+                                * to write to the gotten user page, which a
+                                * read fault here might prevent (a readonly
+                                * page might get reCOWed by userspace write).
                                 */
-                               if (ret & VM_FAULT_WRITE)
+                               if ((ret & VM_FAULT_WRITE) &&
+                                   !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
 
                                cond_resched();
@@ -1842,10 +1908,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * not dirty accountable.
         */
        if (PageAnon(old_page)) {
-               if (trylock_page(old_page)) {
-                       reuse = can_share_swap_page(old_page);
-                       unlock_page(old_page);
+               if (!trylock_page(old_page)) {
+                       page_cache_get(old_page);
+                       pte_unmap_unlock(page_table, ptl);
+                       lock_page(old_page);
+                       page_table = pte_offset_map_lock(mm, pmd, address,
+                                                        &ptl);
+                       if (!pte_same(*page_table, orig_pte)) {
+                               unlock_page(old_page);
+                               page_cache_release(old_page);
+                               goto unlock;
+                       }
+                       page_cache_release(old_page);
                }
+               reuse = reuse_swap_page(old_page);
+               unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
                /*
@@ -1974,7 +2051,7 @@ gotten:
                         * mapcount is visible. So transitively, TLBs to
                         * old page will be flushed before it can be reused.
                         */
-                       page_remove_rmap(old_page, vma);
+                       page_remove_rmap(old_page);
                }
 
                /* Free the old page.. */
@@ -2375,7 +2452,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        inc_mm_counter(mm, anon_rss);
        pte = mk_pte(page, vma->vm_page_prot);
-       if (write_access && can_share_swap_page(page)) {
+       if (write_access && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                write_access = 0;
        }
@@ -2386,7 +2463,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        swap_free(entry);
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
-               remove_exclusive_swap_page(page);
+               try_to_free_swap(page);
        unlock_page(page);
 
        if (write_access) {
@@ -2662,12 +2739,11 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                return 0;
 
-       if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
-                       !(vma->vm_flags & VM_CAN_NONLINEAR))) {
+       if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
                /*
                 * Page table corrupted: show pte and kill process.
                 */
-               print_bad_pte(vma, orig_pte, address);
+               print_bad_pte(vma, address, orig_pte, NULL);
                return VM_FAULT_OOM;
        }
 
@@ -2949,7 +3025,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 {
        resource_size_t phys_addr;
        unsigned long prot = 0;
-       void *maddr;
+       void __iomem *maddr;
        int offset = addr & (PAGE_SIZE-1);
 
        if (follow_phys(vma, addr, write, &prot, &phys_addr))