X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fchar%2Fmspec.c;h=82f2e27dca7d17ac42072adbd56cb52a175d551a;hb=11d2e28241e89227d88da53187224c84316acc86;hp=c716ef0dd370af5043590d12d314e8981daac754;hpb=e8b495fe09bc793ae26774e7b2667f7f658d56e2;p=linux-2.6-omap-h63xx.git diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index c716ef0dd37..82f2e27dca7 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +67,7 @@ /* * Page types allocated by the device. */ -enum { +enum mspec_page_type { MSPEC_FETCHOP = 1, MSPEC_CACHED, MSPEC_UNCACHED @@ -82,15 +83,25 @@ static int is_sn2; * One of these structures is allocated when an mspec region is mmaped. The * structure is pointed to by the vma->vm_private_data field in the vma struct. * This structure is used to record the addresses of the mspec pages. + * This structure is shared by all vma's that are split off from the + * original vma when split_vma()'s are done. + * + * The refcnt is incremented atomically because mm->mmap_sem does not + * protect in fork case where multiple tasks share the vma_data. */ struct vma_data { atomic_t refcnt; /* Number of vmas sharing the data. */ - spinlock_t lock; /* Serialize access to the vma. */ + spinlock_t lock; /* Serialize access to this structure. */ int count; /* Number of pages allocated. */ - int type; /* Type of pages allocated. */ + enum mspec_page_type type; /* Type of pages allocated. */ + int flags; /* See VMD_xxx below. */ + unsigned long vm_start; /* Original (unsplit) base. */ + unsigned long vm_end; /* Original (unsplit) end. */ unsigned long maddr[0]; /* Array of MSPEC addresses. */ }; +#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */ + /* used on shub2 to clear FOP cache in the HUB */ static unsigned long scratch_page[MAX_NUMNODES]; #define SH2_AMO_CACHE_ENTRIES 4 @@ -128,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len) * mspec_open * * Called when a device mapping is created by a means other than mmap - * (via fork, etc.). Increments the reference count on the underlying - * mspec data so it is not freed prematurely. + * (via fork, munmap, etc.). Increments the reference count on the + * underlying mspec data so it is not freed prematurely. */ static void mspec_open(struct vm_area_struct *vma) @@ -144,43 +155,43 @@ mspec_open(struct vm_area_struct *vma) * mspec_close * * Called when unmapping a device mapping. Frees all mspec pages - * belonging to the vma. + * belonging to all the vma's sharing this vma_data structure. */ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; - int i, pages, result, vdata_size; + int index, last_index; + unsigned long my_page; vdata = vma->vm_private_data; + if (!atomic_dec_and_test(&vdata->refcnt)) return; - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - vdata_size = sizeof(struct vma_data) + pages * sizeof(long); - for (i = 0; i < pages; i++) { - if (vdata->maddr[i] == 0) + last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; + for (index = 0; index < last_index; index++) { + if (vdata->maddr[index] == 0) continue; /* * Clear the page before sticking it back * into the pool. */ - result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE); - if (!result) - uncached_free_page(vdata->maddr[i]); + my_page = vdata->maddr[index]; + vdata->maddr[index] = 0; + if (!mspec_zero_block(my_page, PAGE_SIZE)) + uncached_free_page(my_page); else printk(KERN_WARNING "mspec_close(): " - "failed to zero page %i\n", - result); + "failed to zero page %ld\n", my_page); } - if (vdata_size <= PAGE_SIZE) - kfree(vdata); - else + if (vdata->flags & VMD_VMALLOCED) vfree(vdata); + else + kfree(vdata); } - /* * mspec_nopfn * @@ -194,7 +205,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address) int index; struct vma_data *vdata = vma->vm_private_data; - index = (address - vma->vm_start) >> PAGE_SHIFT; + BUG_ON(address < vdata->vm_start || address >= vdata->vm_end); + index = (address - vdata->vm_start) >> PAGE_SHIFT; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); @@ -236,10 +248,11 @@ static struct vm_operations_struct mspec_vm_ops = { * underlying pages. */ static int -mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) +mspec_mmap(struct file *file, struct vm_area_struct *vma, + enum mspec_page_type type) { struct vma_data *vdata; - int pages, vdata_size; + int pages, vdata_size, flags = 0; if (vma->vm_pgoff != 0) return -EINVAL; @@ -254,12 +267,17 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) vdata_size = sizeof(struct vma_data) + pages * sizeof(long); if (vdata_size <= PAGE_SIZE) vdata = kmalloc(vdata_size, GFP_KERNEL); - else + else { vdata = vmalloc(vdata_size); + flags = VMD_VMALLOCED; + } if (!vdata) return -ENOMEM; memset(vdata, 0, vdata_size); + vdata->vm_start = vma->vm_start; + vdata->vm_end = vma->vm_end; + vdata->flags = flags; vdata->type = type; spin_lock_init(&vdata->lock); vdata->refcnt = ATOMIC_INIT(1); @@ -344,7 +362,7 @@ mspec_init(void) is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; - for_each_online_node(nid) { + for_each_node_state(nid, N_ONLINE) { int actual_nid; int nasid; unsigned long phys;