]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/video/fb_defio.c
video: deferred io cleanup
[linux-2.6-omap-h63xx.git] / drivers / video / fb_defio.c
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/fb.h>
21 #include <linux/list.h>
22
23 /* to support deferred IO */
24 #include <linux/rmap.h>
25 #include <linux/pagemap.h>
26
27 /* this is to find and return the vmalloc-ed fb pages */
28 static int fb_deferred_io_fault(struct vm_area_struct *vma,
29                                 struct vm_fault *vmf)
30 {
31         unsigned long offset;
32         struct page *page;
33         struct fb_info *info = vma->vm_private_data;
34         /* info->screen_base is virtual memory */
35         void *screen_base = (void __force *) info->screen_base;
36
37         offset = vmf->pgoff << PAGE_SHIFT;
38         if (offset >= info->fix.smem_len)
39                 return VM_FAULT_SIGBUS;
40
41         page = vmalloc_to_page(screen_base + offset);
42         if (!page)
43                 return VM_FAULT_SIGBUS;
44
45         get_page(page);
46
47         if (vma->vm_file)
48                 page->mapping = vma->vm_file->f_mapping;
49         else
50                 printk(KERN_ERR "no mapping available\n");
51
52         BUG_ON(!page->mapping);
53         page->index = vmf->pgoff;
54
55         vmf->page = page;
56         return 0;
57 }
58
59 int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
60 {
61         struct fb_info *info = file->private_data;
62
63         /* Skip if deferred io is complied-in but disabled on this fbdev */
64         if (!info->fbdefio)
65                 return 0;
66
67         /* Kill off the delayed work */
68         cancel_rearming_delayed_work(&info->deferred_work);
69
70         /* Run it immediately */
71         return schedule_delayed_work(&info->deferred_work, 0);
72 }
73 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
74
75 /* vm_ops->page_mkwrite handler */
76 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
77                                   struct page *page)
78 {
79         struct fb_info *info = vma->vm_private_data;
80         struct fb_deferred_io *fbdefio = info->fbdefio;
81         struct page *cur;
82
83         /* this is a callback we get when userspace first tries to
84         write to the page. we schedule a workqueue. that workqueue
85         will eventually mkclean the touched pages and execute the
86         deferred framebuffer IO. then if userspace touches a page
87         again, we repeat the same scheme */
88
89         /* protect against the workqueue changing the page list */
90         mutex_lock(&fbdefio->lock);
91
92         /* we loop through the pagelist before adding in order
93         to keep the pagelist sorted */
94         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
95                 /* this check is to catch the case where a new
96                 process could start writing to the same page
97                 through a new pte. this new access can cause the
98                 mkwrite even when the original ps's pte is marked
99                 writable */
100                 if (unlikely(cur == page))
101                         goto page_already_added;
102                 else if (cur->index > page->index)
103                         break;
104         }
105
106         list_add_tail(&page->lru, &cur->lru);
107
108 page_already_added:
109         mutex_unlock(&fbdefio->lock);
110
111         /* come back after delay to process the deferred IO */
112         schedule_delayed_work(&info->deferred_work, fbdefio->delay);
113         return 0;
114 }
115
116 static struct vm_operations_struct fb_deferred_io_vm_ops = {
117         .fault          = fb_deferred_io_fault,
118         .page_mkwrite   = fb_deferred_io_mkwrite,
119 };
120
121 static int fb_deferred_io_set_page_dirty(struct page *page)
122 {
123         if (!PageDirty(page))
124                 SetPageDirty(page);
125         return 0;
126 }
127
128 static const struct address_space_operations fb_deferred_io_aops = {
129         .set_page_dirty = fb_deferred_io_set_page_dirty,
130 };
131
132 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
133 {
134         vma->vm_ops = &fb_deferred_io_vm_ops;
135         vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
136         vma->vm_private_data = info;
137         return 0;
138 }
139
140 /* workqueue callback */
141 static void fb_deferred_io_work(struct work_struct *work)
142 {
143         struct fb_info *info = container_of(work, struct fb_info,
144                                                 deferred_work.work);
145         struct list_head *node, *next;
146         struct page *cur;
147         struct fb_deferred_io *fbdefio = info->fbdefio;
148
149         /* here we mkclean the pages, then do all deferred IO */
150         mutex_lock(&fbdefio->lock);
151         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
152                 lock_page(cur);
153                 page_mkclean(cur);
154                 unlock_page(cur);
155         }
156
157         /* driver's callback with pagelist */
158         fbdefio->deferred_io(info, &fbdefio->pagelist);
159
160         /* clear the list */
161         list_for_each_safe(node, next, &fbdefio->pagelist) {
162                 list_del(node);
163         }
164         mutex_unlock(&fbdefio->lock);
165 }
166
167 void fb_deferred_io_init(struct fb_info *info)
168 {
169         struct fb_deferred_io *fbdefio = info->fbdefio;
170
171         BUG_ON(!fbdefio);
172         mutex_init(&fbdefio->lock);
173         info->fbops->fb_mmap = fb_deferred_io_mmap;
174         INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
175         INIT_LIST_HEAD(&fbdefio->pagelist);
176         if (fbdefio->delay == 0) /* set a default of 1 s */
177                 fbdefio->delay = HZ;
178 }
179 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
180
181 void fb_deferred_io_open(struct fb_info *info,
182                          struct inode *inode,
183                          struct file *file)
184 {
185         file->f_mapping->a_ops = &fb_deferred_io_aops;
186 }
187 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
188
189 void fb_deferred_io_cleanup(struct fb_info *info)
190 {
191         void *screen_base = (void __force *) info->screen_base;
192         struct fb_deferred_io *fbdefio = info->fbdefio;
193         struct page *page;
194         int i;
195
196         BUG_ON(!fbdefio);
197         cancel_delayed_work(&info->deferred_work);
198         flush_scheduled_work();
199
200         /* clear out the mapping that we setup */
201         for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
202                 page = vmalloc_to_page(screen_base + i);
203                 page->mapping = NULL;
204         }
205
206         info->fbops->fb_mmap = NULL;
207         mutex_destroy(&fbdefio->lock);
208 }
209 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
210
211 MODULE_LICENSE("GPL");