]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/char/mem.c
[MIPS] Alchemy: move UART platform code to its proper place
[linux-2.6-omap-h63xx.git] / drivers / char / mem.c
index 20070b7c573d0087416213cec1fd16bf6ba825d7..e83623ead4414efaf02a8d7e124f5719b18479c4 100644 (file)
  */
 static inline int uncached_access(struct file *file, unsigned long addr)
 {
-#if defined(__i386__) && !defined(__arch_um__)
-       /*
-        * On the PPro and successors, the MTRRs are used to set
-        * memory types for physical addresses outside main memory,
-        * so blindly setting PCD or PWT on those pages is wrong.
-        * For Pentiums and earlier, the surround logic should disable
-        * caching for the high addresses through the KEN pin, but
-        * we maintain the tradition of paranoia in this code.
-        */
-       if (file->f_flags & O_SYNC)
-               return 1;
-       return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
-                 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
-                 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
-                 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
-         && addr >= __pa(high_memory);
-#elif defined(__x86_64__) && !defined(__arch_um__)
-       /* 
-        * This is broken because it can generate memory type aliases,
-        * which can cause cache corruptions
-        * But it is only available for root and we have to be bug-to-bug
-        * compatible with i386.
-        */
-       if (file->f_flags & O_SYNC)
-               return 1;
-       /* same behaviour as i386. PAT always set to cached and MTRRs control the
-          caching behaviour. 
-          Hopefully a full PAT implementation will fix that soon. */      
-       return 0;
-#elif defined(CONFIG_IA64)
+#if defined(CONFIG_IA64)
        /*
         * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
         */
@@ -108,6 +79,36 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 }
 #endif
 
+#ifdef CONFIG_NONPROMISC_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+       u64 from = ((u64)pfn) << PAGE_SHIFT;
+       u64 to = from + size;
+       u64 cursor = from;
+
+       while (cursor < to) {
+               if (!devmem_is_allowed(pfn)) {
+                       printk(KERN_INFO
+               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+                               current->comm, from, to);
+                       return 0;
+               }
+               cursor += PAGE_SIZE;
+               pfn++;
+       }
+       return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+       return 1;
+}
+#endif
+
+void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
+{
+}
+
 /*
  * This funcion reads the *physical* memory. The f_pos points directly to the 
  * memory location. 
@@ -150,15 +151,25 @@ static ssize_t read_mem(struct file * file, char __user * buf,
 
                sz = min_t(unsigned long, sz, count);
 
+               if (!range_is_allowed(p >> PAGE_SHIFT, count))
+                       return -EPERM;
+
                /*
                 * On ia64 if a page has been mapped somewhere as
                 * uncached, then it must also be accessed uncached
                 * by the kernel or data corruption may occur
                 */
                ptr = xlate_dev_mem_ptr(p);
+               if (!ptr)
+                       return -EFAULT;
 
-               if (copy_to_user(buf, ptr, sz))
+               if (copy_to_user(buf, ptr, sz)) {
+                       unxlate_dev_mem_ptr(p, ptr);
                        return -EFAULT;
+               }
+
+               unxlate_dev_mem_ptr(p, ptr);
+
                buf += sz;
                p += sz;
                count -= sz;
@@ -207,20 +218,32 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
 
                sz = min_t(unsigned long, sz, count);
 
+               if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+                       return -EPERM;
+
                /*
                 * On ia64 if a page has been mapped somewhere as
                 * uncached, then it must also be accessed uncached
                 * by the kernel or data corruption may occur
                 */
                ptr = xlate_dev_mem_ptr(p);
+               if (!ptr) {
+                       if (written)
+                               break;
+                       return -EFAULT;
+               }
 
                copied = copy_from_user(ptr, buf, sz);
                if (copied) {
                        written += sz - copied;
+                       unxlate_dev_mem_ptr(p, ptr);
                        if (written)
                                break;
                        return -EFAULT;
                }
+
+               unxlate_dev_mem_ptr(p, ptr);
+
                buf += sz;
                p += sz;
                count -= sz;
@@ -231,6 +254,12 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
        return written;
 }
 
+int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
+       unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
+{
+       return 1;
+}
+
 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                     unsigned long size, pgprot_t vma_prot)
@@ -271,6 +300,35 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
 }
 #endif
 
+void __attribute__((weak))
+map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
+{
+       /* nothing. architectures can override. */
+}
+
+void __attribute__((weak))
+unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
+{
+       /* nothing. architectures can override. */
+}
+
+static void mmap_mem_open(struct vm_area_struct *vma)
+{
+       map_devmem(vma->vm_pgoff,  vma->vm_end - vma->vm_start,
+                       vma->vm_page_prot);
+}
+
+static void mmap_mem_close(struct vm_area_struct *vma)
+{
+       unmap_devmem(vma->vm_pgoff,  vma->vm_end - vma->vm_start,
+                       vma->vm_page_prot);
+}
+
+static struct vm_operations_struct mmap_mem_ops = {
+       .open  = mmap_mem_open,
+       .close = mmap_mem_close
+};
+
 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
 {
        size_t size = vma->vm_end - vma->vm_start;
@@ -281,17 +339,28 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
        if (!private_mapping_ok(vma))
                return -ENOSYS;
 
+       if (!range_is_allowed(vma->vm_pgoff, size))
+               return -EPERM;
+
+       if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
+                                               &vma->vm_page_prot))
+               return -EINVAL;
+
        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
                                                 size,
                                                 vma->vm_page_prot);
 
+       vma->vm_ops = &mmap_mem_ops;
+
        /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
        if (remap_pfn_range(vma,
                            vma->vm_start,
                            vma->vm_pgoff,
                            size,
-                           vma->vm_page_prot))
+                           vma->vm_page_prot)) {
+               unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
                return -EAGAIN;
+       }
        return 0;
 }