refs = 0;
        head = pte_page(pte);
-       page = head + ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
+       page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
        return 1;
 }
 
+static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
+               unsigned long end, int write, struct page **pages, int *nr)
+{
+       unsigned long mask;
+       pte_t pte = *(pte_t *)&pud;
+       struct page *head, *page;
+       int refs;
+
+       mask = _PAGE_PRESENT|_PAGE_USER;
+       if (write)
+               mask |= _PAGE_RW;
+       if ((pte_val(pte) & mask) != mask)
+               return 0;
+       /* hugepages are never "special" */
+       VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL);
+       VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+       refs = 0;
+       head = pte_page(pte);
+       page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+       do {
+               VM_BUG_ON(compound_head(page) != head);
+               pages[*nr] = page;
+               (*nr)++;
+               page++;
+               refs++;
+       } while (addr += PAGE_SIZE, addr != end);
+       get_head_page_multiple(head, refs);
+
+       return 1;
+}
+
 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
                        int write, struct page **pages, int *nr)
 {
                next = pud_addr_end(addr, end);
                if (pud_none(pud))
                        return 0;
-               if (!gup_pmd_range(pud, addr, next, write, pages, nr))
-                       return 0;
+               if (unlikely(pud_large(pud))) {
+                       if (!gup_huge_pud(pud, addr, next, write, pages, nr))
+                               return 0;
+               } else {
+                       if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+                               return 0;
+               }
        } while (pudp++, addr = next, addr != end);
 
        return 1;