static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
                                size_t size, int dir, unsigned long align_mask)
 {
-       unsigned long npages = iommu_nr_pages(phys_mem, size);
+       unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
        unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
        int i;
 
                return;
 
        iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
-       npages = iommu_nr_pages(dma_addr, size);
+       npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        for (i = 0; i < npages; i++) {
                iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
                CLEAR_LEAK(iommu_page + i);
                }
 
                addr = phys_addr;
-               pages = iommu_nr_pages(s->offset, s->length);
+               pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
                while (pages--) {
                        iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
                        SET_LEAK(iommu_page);
 
                seg_size += s->length;
                need = nextneed;
-               pages += iommu_nr_pages(s->offset, s->length);
+               pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
                ps = s;
        }
        if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)