return 0;
 }
 
-static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
-               const char *prop)
-{
-       const struct address_prop {
-               unsigned long address;
-               unsigned int len;
-       } __attribute__((packed)) *p;
-       int proplen;
-
-       unsigned long start_pfn, nr_pages;
-       struct pglist_data *pgdata;
-       struct zone *zone;
-       int ret;
-
-       p = get_property(spe, prop, &proplen);
-       WARN_ON(proplen != sizeof (*p));
-
-       start_pfn = p->address >> PAGE_SHIFT;
-       nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-       pgdata = NODE_DATA(spu->node);
-       zone = pgdata->node_zones;
-
-       ret = __add_pages(zone, start_pfn, nr_pages);
-
-       return ret;
-}
-
-static void __iomem * __init map_spe_prop(struct spu *spu,
-               struct device_node *n, const char *name)
-{
-       const struct address_prop {
-               unsigned long address;
-               unsigned int len;
-       } __attribute__((packed)) *prop;
-
-       const void *p;
-       int proplen;
-       void __iomem *ret = NULL;
-       int err = 0;
-
-       p = get_property(n, name, &proplen);
-       if (proplen != sizeof (struct address_prop))
-               return NULL;
-
-       prop = p;
-
-       err = cell_spuprop_present(spu, n, name);
-       if (err && (err != -EEXIST))
-               goto out;
-
-       ret = ioremap(prop->address, prop->len);
-
- out:
-       return ret;
-}
-
 static void spu_unmap(struct spu *spu)
 {
        if (!firmware_has_feature(FW_FEATURE_LPAR))
        return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
 }
 
+static void __iomem * __init spu_map_prop_old(struct spu *spu,
+                                             struct device_node *n,
+                                             const char *name)
+{
+       const struct address_prop {
+               unsigned long address;
+               unsigned int len;
+       } __attribute__((packed)) *prop;
+       int proplen;
+
+       prop = get_property(n, name, &proplen);
+       if (prop == NULL || proplen != sizeof (struct address_prop))
+               return NULL;
+
+       return ioremap(prop->address, prop->len);
+}
+
 static int __init spu_map_device_old(struct spu *spu)
 {
        struct device_node *node = spu->devnode;
 
        /* we use local store as ram, not io memory */
        spu->local_store = (void __force *)
-               map_spe_prop(spu, node, "local-store");
+               spu_map_prop_old(spu, node, "local-store");
        if (!spu->local_store)
                goto out;
 
                goto out_unmap;
        spu->problem_phys = *(unsigned long *)prop;
 
-       spu->problem = map_spe_prop(spu, node, "problem");
+       spu->problem = spu_map_prop_old(spu, node, "problem");
        if (!spu->problem)
                goto out_unmap;
 
-       spu->priv2 = map_spe_prop(spu, node, "priv2");
+       spu->priv2 = spu_map_prop_old(spu, node, "priv2");
        if (!spu->priv2)
                goto out_unmap;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               spu->priv1 = map_spe_prop(spu, node, "priv1");
+               spu->priv1 = spu_map_prop_old(spu, node, "priv1");
                if (!spu->priv1)
                        goto out_unmap;
        }
                            void __iomem** virt, unsigned long *phys)
 {
        struct device_node *np = spu->devnode;
-       unsigned long start_pfn, nr_pages;
-       struct pglist_data *pgdata;
-       struct zone *zone;
        struct resource resource = { };
        unsigned long len;
        int ret;
 
        ret = of_address_to_resource(np, nr, &resource);
        if (ret)
-               goto out;
-
+               return ret;
        if (phys)
                *phys = resource.start;
        len = resource.end - resource.start + 1;
        *virt = ioremap(resource.start, len);
        if (!*virt)
-               ret = -EINVAL;
-
-       start_pfn = resource.start >> PAGE_SHIFT;
-       nr_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-       pgdata = NODE_DATA(spu->node);
-       zone = pgdata->node_zones;
-
-       ret = __add_pages(zone, start_pfn, nr_pages);
-
-out:
-       return ret;
+               return -EINVAL;
+       return 0;
 }
 
 static int __init spu_map_device(struct spu *spu)
 
        return ret;
 }
 
-static struct page *
-spufs_mem_mmap_nopage(struct vm_area_struct *vma,
-                     unsigned long address, int *type)
+static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
+                                         unsigned long address)
 {
-       struct page *page = NOPAGE_SIGBUS;
-
        struct spu_context *ctx = vma->vm_file->private_data;
-       unsigned long offset = address - vma->vm_start;
+       unsigned long pfn, offset = address - vma->vm_start;
+
        offset += vma->vm_pgoff << PAGE_SHIFT;
 
        spu_acquire(ctx);
        if (ctx->state == SPU_STATE_SAVED) {
                vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                                        & ~_PAGE_NO_CACHE);
-               page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
+               pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
        } else {
                vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                                       | _PAGE_NO_CACHE);
-               page = pfn_to_page((ctx->spu->local_store_phys + offset)
-                                  >> PAGE_SHIFT);
+                                            | _PAGE_NO_CACHE);
+               pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
        }
-       spu_release(ctx);
+       vm_insert_pfn(vma, address, pfn);
 
-       if (type)
-               *type = VM_FAULT_MINOR;
+       spu_release(ctx);
 
-       page_cache_get(page);
-       return page;
+       return NOPFN_REFAULT;
 }
 
+
 static struct vm_operations_struct spufs_mem_mmap_vmops = {
-       .nopage = spufs_mem_mmap_nopage,
+       .nopfn = spufs_mem_mmap_nopfn,
 };
 
 static int
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE);
 
        .mmap    = spufs_mem_mmap,
 };
 
-static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
+static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
                                    unsigned long address,
-                                   int *type, unsigned long ps_offs,
+                                   unsigned long ps_offs,
                                    unsigned long ps_size)
 {
-       struct page *page = NOPAGE_SIGBUS;
-       int fault_type = VM_FAULT_SIGBUS;
        struct spu_context *ctx = vma->vm_file->private_data;
-       unsigned long offset = address - vma->vm_start;
-       unsigned long area;
+       unsigned long area, offset = address - vma->vm_start;
        int ret;
 
        offset += vma->vm_pgoff << PAGE_SHIFT;
        if (offset >= ps_size)
-               goto out;
+               return NOPFN_SIGBUS;
 
+       /* error here usually means a signal.. we might want to test
+        * the error code more precisely though
+        */
        ret = spu_acquire_runnable(ctx);
        if (ret)
-               goto out;
+               return NOPFN_REFAULT;
 
        area = ctx->spu->problem_phys + ps_offs;
-       page = pfn_to_page((area + offset) >> PAGE_SHIFT);
-       fault_type = VM_FAULT_MINOR;
-       page_cache_get(page);
-
+       vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
        spu_release(ctx);
 
-      out:
-       if (type)
-               *type = fault_type;
-
-       return page;
+       return NOPFN_REFAULT;
 }
 
 #if SPUFS_MMAP_4K
-static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
-                                          unsigned long address, int *type)
+static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
+                                          unsigned long address)
 {
-       return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
+       return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
 }
 
 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
-       .nopage = spufs_cntl_mmap_nopage,
+       .nopfn = spufs_cntl_mmap_nopfn,
 };
 
 /*
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
        return 4;
 }
 
-static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
-                                             unsigned long address, int *type)
+static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
+                                             unsigned long address)
 {
 #if PAGE_SIZE == 0x1000
-       return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
+       return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
 #elif PAGE_SIZE == 0x10000
        /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
         * signal 1 and 2 area
         */
-       return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
+       return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
 #else
 #error unsupported page size
 #endif
 }
 
 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
-       .nopage = spufs_signal1_mmap_nopage,
+       .nopfn = spufs_signal1_mmap_nopfn,
 };
 
 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
 }
 
 #if SPUFS_MMAP_4K
-static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
-                                             unsigned long address, int *type)
+static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
+                                             unsigned long address)
 {
 #if PAGE_SIZE == 0x1000
-       return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
+       return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
 #elif PAGE_SIZE == 0x10000
        /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
         * signal 1 and 2 area
         */
-       return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
+       return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
 #else
 #error unsupported page size
 #endif
 }
 
 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
-       .nopage = spufs_signal2_mmap_nopage,
+       .nopfn = spufs_signal2_mmap_nopfn,
 };
 
 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
                                        spufs_signal2_type_set, "%llu");
 
 #if SPUFS_MMAP_4K
-static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
-                                          unsigned long address, int *type)
+static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
+                                         unsigned long address)
 {
-       return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
+       return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
 }
 
 static struct vm_operations_struct spufs_mss_mmap_vmops = {
-       .nopage = spufs_mss_mmap_nopage,
+       .nopfn = spufs_mss_mmap_nopfn,
 };
 
 /*
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
        .mmap    = spufs_mss_mmap,
 };
 
-static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
-                                          unsigned long address, int *type)
+static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
+                                           unsigned long address)
 {
-       return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
+       return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
 }
 
 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
-       .nopage = spufs_psmap_mmap_nopage,
+       .nopfn = spufs_psmap_mmap_nopfn,
 };
 
 /*
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
 
 
 #if SPUFS_MMAP_4K
-static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
-                                          unsigned long address, int *type)
+static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
+                                         unsigned long address)
 {
-       return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
+       return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
 }
 
 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
-       .nopage = spufs_mfc_mmap_nopage,
+       .nopfn = spufs_mfc_mmap_nopfn,
 };
 
 /*
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);