/*
         * Mark this as IO
         */
-       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        if (remap_pfn_range(vma, vma->vm_start, phys,
 
        /* Leave vm_pgoff as-is, the PCI space address is the physical
         * address on this platform.
         */
-       vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
-
        prot = pgprot_val(vma->vm_page_prot);
        vma->vm_page_prot = __pgprot(prot);
 
 
        /* Leave vm_pgoff as-is, the PCI space address is the physical
         * address on this platform.
         */
-       vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
-
        prot = pgprot_val(vma->vm_page_prot);
        if (boot_cpu_data.x86 > 3)
                prot |= _PAGE_PCD | _PAGE_PWT;
 
         * Leave vm_pgoff as-is, the PCI space address is the physical
         * address on this platform.
         */
-       vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
-
        if (write_combine && efi_range_is_wc(vma->vm_start,
                                             vma->vm_end - vma->vm_start))
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
        vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
        vma->vm_page_prot = prot;
-       vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
 
        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                            size, vma->vm_page_prot))
 
                return -EINVAL;
 
        vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
        vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
                                                  vma->vm_page_prot,
                                                  mmap_state, write_combine);
 
                return -EINVAL;
 
        vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
        vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
                                                  vma->vm_page_prot,
                                                  mmap_state, write_combine);
 
 {
        struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
 
-       vma->vm_flags |= VM_SHM | VM_LOCKED;
-
        if ((vma->vm_end - vma->vm_start) > dp->size)
                return -EINVAL;
 
 
                return -EINVAL;
 
        vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
        vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
                                                  vma->vm_page_prot,
                                                  mmap_state, write_combine);
 
        return -EINVAL;
 }
 
-/*
- * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
- * mapping.
- */
-static __inline__ void
-__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
-                    enum pci_mmap_state mmap_state)
-{
-       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
-}
-
 /*
  * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
  * device mapping.
        if (ret < 0)
                return ret;
 
-       __pci_mmap_set_flags(dev, vma, mmap_state);
        __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
 
        ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 
        if (PAGE_SIZE > (1 << 16))
                return -ENOSYS;
 
-       vma->vm_flags |= (VM_IO | VM_SHM | VM_LOCKED );
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        mmtimer_addr = __pa(RTC_COUNTER_ADDR);
 
        if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
                size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
 
-       vma->vm_flags |= (VM_SHM | VM_LOCKED);
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
 
                map_size = sizeof(struct vfc_regs);
 
        vma->vm_flags |=
-               (VM_SHM | VM_LOCKED | VM_IO | VM_MAYREAD | VM_MAYWRITE | VM_MAYSHARE);
+               (VM_MAYREAD | VM_MAYWRITE | VM_MAYSHARE);
        map_offset = (unsigned int) (long)dev->phys_regs;
        ret = io_remap_pfn_range(vma, vma->vm_start,
                                  MK_IOSPACE_PFN(dev->which_io,
 
 
        size = vma->vm_end - vma->vm_start;
 
-       /* To stop the swapper from even considering these pages. */
-       vma->vm_flags |= (VM_SHM | VM_LOCKED);
-
        /* Each page, see which map applies */
        for (page = 0; page < size; ) {
                map_size = 0;
 
 
 #define VM_GROWSDOWN   0x00000100      /* general info on the segment */
 #define VM_GROWSUP     0x00000200
-#define VM_SHM         0x00000000      /* Means nothing: delete it later */
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */