]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/video/fb_defio.c
fb_defio: fix for non-dirty ptes
[net-next-2.6.git] / drivers / video / fb_defio.c
CommitLineData
60b59bea
JK
1/*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
de7c6d15 7 * License. See the file COPYING in the main directory of this archive
60b59bea
JK
8 * for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/mm.h>
60b59bea
JK
16#include <linux/vmalloc.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/fb.h>
20#include <linux/list.h>
60b59bea
JK
21
22/* to support deferred IO */
23#include <linux/rmap.h>
24#include <linux/pagemap.h>
25
37b48379
MD
26struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27{
28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page;
30
31 if (is_vmalloc_addr(screen_base + offs))
32 page = vmalloc_to_page(screen_base + offs);
33 else
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36 return page;
37}
38
60b59bea 39/* this is to find and return the vmalloc-ed fb pages */
529e55b6
NP
40static int fb_deferred_io_fault(struct vm_area_struct *vma,
41 struct vm_fault *vmf)
60b59bea
JK
42{
43 unsigned long offset;
44 struct page *page;
45 struct fb_info *info = vma->vm_private_data;
46
529e55b6 47 offset = vmf->pgoff << PAGE_SHIFT;
60b59bea 48 if (offset >= info->fix.smem_len)
529e55b6 49 return VM_FAULT_SIGBUS;
60b59bea 50
37b48379 51 page = fb_deferred_io_page(info, offset);
60b59bea 52 if (!page)
529e55b6 53 return VM_FAULT_SIGBUS;
60b59bea
JK
54
55 get_page(page);
de7c6d15
JK
56
57 if (vma->vm_file)
58 page->mapping = vma->vm_file->f_mapping;
59 else
60 printk(KERN_ERR "no mapping available\n");
61
62 BUG_ON(!page->mapping);
63 page->index = vmf->pgoff;
64
529e55b6
NP
65 vmf->page = page;
66 return 0;
60b59bea
JK
67}
68
5e841b88
PM
69int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
70{
71 struct fb_info *info = file->private_data;
72
94e2bd68 73 /* Skip if deferred io is compiled-in but disabled on this fbdev */
87884bd8
MD
74 if (!info->fbdefio)
75 return 0;
76
5e841b88
PM
77 /* Kill off the delayed work */
78 cancel_rearming_delayed_work(&info->deferred_work);
79
80 /* Run it immediately */
81 return schedule_delayed_work(&info->deferred_work, 0);
82}
83EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
84
60b59bea 85/* vm_ops->page_mkwrite handler */
7bf1ea33 86static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
c2ec175c 87 struct vm_fault *vmf)
60b59bea 88{
c2ec175c 89 struct page *page = vmf->page;
60b59bea
JK
90 struct fb_info *info = vma->vm_private_data;
91 struct fb_deferred_io *fbdefio = info->fbdefio;
f31ad92f 92 struct page *cur;
60b59bea
JK
93
94 /* this is a callback we get when userspace first tries to
95 write to the page. we schedule a workqueue. that workqueue
96 will eventually mkclean the touched pages and execute the
97 deferred framebuffer IO. then if userspace touches a page
98 again, we repeat the same scheme */
99
100 /* protect against the workqueue changing the page list */
101 mutex_lock(&fbdefio->lock);
f31ad92f
JK
102
103 /* we loop through the pagelist before adding in order
104 to keep the pagelist sorted */
105 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
106 /* this check is to catch the case where a new
107 process could start writing to the same page
108 through a new pte. this new access can cause the
109 mkwrite even when the original ps's pte is marked
110 writable */
111 if (unlikely(cur == page))
112 goto page_already_added;
113 else if (cur->index > page->index)
114 break;
115 }
116
117 list_add_tail(&page->lru, &cur->lru);
118
119page_already_added:
60b59bea
JK
120 mutex_unlock(&fbdefio->lock);
121
122 /* come back after delay to process the deferred IO */
123 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
124 return 0;
125}
126
f0f37e2f 127static const struct vm_operations_struct fb_deferred_io_vm_ops = {
529e55b6 128 .fault = fb_deferred_io_fault,
60b59bea
JK
129 .page_mkwrite = fb_deferred_io_mkwrite,
130};
131
d847471d
IC
132static int fb_deferred_io_set_page_dirty(struct page *page)
133{
134 if (!PageDirty(page))
135 SetPageDirty(page);
136 return 0;
137}
138
139static const struct address_space_operations fb_deferred_io_aops = {
140 .set_page_dirty = fb_deferred_io_set_page_dirty,
141};
142
60b59bea
JK
143static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
144{
145 vma->vm_ops = &fb_deferred_io_vm_ops;
7164bb43
KRW
146 vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
147 if (!(info->flags & FBINFO_VIRTFB))
148 vma->vm_flags |= VM_IO;
60b59bea
JK
149 vma->vm_private_data = info;
150 return 0;
151}
152
153/* workqueue callback */
154static void fb_deferred_io_work(struct work_struct *work)
155{
156 struct fb_info *info = container_of(work, struct fb_info,
157 deferred_work.work);
60b59bea 158 struct fb_deferred_io *fbdefio = info->fbdefio;
49bbd815
AH
159 struct page *page, *tmp_page;
160 struct list_head *node, *tmp_node;
161 struct list_head non_dirty;
162
163 INIT_LIST_HEAD(&non_dirty);
60b59bea
JK
164
165 /* here we mkclean the pages, then do all deferred IO */
166 mutex_lock(&fbdefio->lock);
49bbd815
AH
167 list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
168 lock_page(page);
169 /*
170 * The workqueue callback can be triggered after a
171 * ->page_mkwrite() call but before the PTE has been marked
172 * dirty. In this case page_mkclean() won't "rearm" the page.
173 *
174 * To avoid this, remove those "non-dirty" pages from the
175 * pagelist before calling the driver's callback, then add
176 * them back to get processed on the next work iteration.
177 * At that time, their PTEs will hopefully be dirty for real.
178 */
179 if (!page_mkclean(page))
180 list_move_tail(&page->lru, &non_dirty);
181 unlock_page(page);
60b59bea
JK
182 }
183
184 /* driver's callback with pagelist */
185 fbdefio->deferred_io(info, &fbdefio->pagelist);
186
49bbd815
AH
187 /* clear the list... */
188 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
60b59bea
JK
189 list_del(node);
190 }
49bbd815
AH
191 /* ... and add back the "non-dirty" pages to the list */
192 list_splice_tail(&non_dirty, &fbdefio->pagelist);
60b59bea
JK
193 mutex_unlock(&fbdefio->lock);
194}
195
196void fb_deferred_io_init(struct fb_info *info)
197{
198 struct fb_deferred_io *fbdefio = info->fbdefio;
199
200 BUG_ON(!fbdefio);
201 mutex_init(&fbdefio->lock);
202 info->fbops->fb_mmap = fb_deferred_io_mmap;
203 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
204 INIT_LIST_HEAD(&fbdefio->pagelist);
205 if (fbdefio->delay == 0) /* set a default of 1 s */
206 fbdefio->delay = HZ;
207}
208EXPORT_SYMBOL_GPL(fb_deferred_io_init);
209
d847471d
IC
210void fb_deferred_io_open(struct fb_info *info,
211 struct inode *inode,
212 struct file *file)
213{
214 file->f_mapping->a_ops = &fb_deferred_io_aops;
215}
216EXPORT_SYMBOL_GPL(fb_deferred_io_open);
217
60b59bea
JK
218void fb_deferred_io_cleanup(struct fb_info *info)
219{
220 struct fb_deferred_io *fbdefio = info->fbdefio;
49bbd815 221 struct list_head *node, *tmp_node;
de7c6d15
JK
222 struct page *page;
223 int i;
60b59bea
JK
224
225 BUG_ON(!fbdefio);
226 cancel_delayed_work(&info->deferred_work);
227 flush_scheduled_work();
de7c6d15 228
49bbd815
AH
229 /* the list may have still some non-dirty pages at this point */
230 mutex_lock(&fbdefio->lock);
231 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
232 list_del(node);
233 }
234 mutex_unlock(&fbdefio->lock);
235
de7c6d15
JK
236 /* clear out the mapping that we setup */
237 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
37b48379 238 page = fb_deferred_io_page(info, i);
de7c6d15
JK
239 page->mapping = NULL;
240 }
6e1038a9
MD
241
242 info->fbops->fb_mmap = NULL;
243 mutex_destroy(&fbdefio->lock);
60b59bea
JK
244}
245EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
246
247MODULE_LICENSE("GPL");