]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/dream/pmem.c
Staging: crystalhd_lnx: remove casts from void*
[net-next-2.6.git] / drivers / staging / dream / pmem.c
CommitLineData
9b843757
PM
1/* drivers/android/pmem.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/miscdevice.h>
17#include <linux/platform_device.h>
18#include <linux/fs.h>
19#include <linux/file.h>
20#include <linux/mm.h>
21#include <linux/list.h>
22#include <linux/debugfs.h>
23#include <linux/android_pmem.h>
24#include <linux/mempolicy.h>
25#include <linux/sched.h>
5a0e3ad6 26#include <linux/slab.h>
ef079a76
NC
27#include <linux/io.h>
28#include <linux/uaccess.h>
9b843757
PM
29#include <asm/cacheflush.h>
30
31#define PMEM_MAX_DEVICES 10
32#define PMEM_MAX_ORDER 128
33#define PMEM_MIN_ALLOC PAGE_SIZE
34
35#define PMEM_DEBUG 1
36
37/* indicates that a refernce to this file has been taken via get_pmem_file,
38 * the file should not be released until put_pmem_file is called */
39#define PMEM_FLAGS_BUSY 0x1
40/* indicates that this is a suballocation of a larger master range */
eb450e89 41#define PMEM_FLAGS_CONNECTED (0x1 << 1)
9b843757 42/* indicates this is a master and not a sub allocation and that it is mmaped */
eb450e89 43#define PMEM_FLAGS_MASTERMAP (0x1 << 2)
9b843757
PM
44/* submap and unsubmap flags indicate:
45 * 00: subregion has never been mmaped
46 * 10: subregion has been mmaped, reference to the mm was taken
47 * 11: subretion has ben released, refernece to the mm still held
48 * 01: subretion has been released, reference to the mm has been released
49 */
eb450e89
CC
50#define PMEM_FLAGS_SUBMAP (0x1 << 3)
51#define PMEM_FLAGS_UNSUBMAP (0x1 << 4)
9b843757
PM
52
53
54struct pmem_data {
55 /* in alloc mode: an index into the bitmap
56 * in no_alloc mode: the size of the allocation */
57 int index;
58 /* see flags above for descriptions */
59 unsigned int flags;
60 /* protects this data field, if the mm_mmap sem will be held at the
61 * same time as this sem, the mm sem must be taken first (as this is
62 * the order for vma_open and vma_close ops */
63 struct rw_semaphore sem;
64 /* info about the mmaping process */
65 struct vm_area_struct *vma;
66 /* task struct of the mapping process */
67 struct task_struct *task;
68 /* process id of teh mapping process */
69 pid_t pid;
70 /* file descriptor of the master */
71 int master_fd;
72 /* file struct of the master */
73 struct file *master_file;
74 /* a list of currently available regions if this is a suballocation */
75 struct list_head region_list;
76 /* a linked list of data so we can access them for debugging */
77 struct list_head list;
78#if PMEM_DEBUG
79 int ref;
80#endif
81};
82
83struct pmem_bits {
84 unsigned allocated:1; /* 1 if allocated, 0 if free */
85 unsigned order:7; /* size of the region in pmem space */
86};
87
88struct pmem_region_node {
89 struct pmem_region region;
90 struct list_head list;
91};
92
93#define PMEM_DEBUG_MSGS 0
94#if PMEM_DEBUG_MSGS
df16b962 95#define DLOG(fmt, args...) \
9b843757
PM
96 do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
97 ##args); } \
98 while (0)
99#else
100#define DLOG(x...) do {} while (0)
101#endif
102
103struct pmem_info {
104 struct miscdevice dev;
105 /* physical start address of the remaped pmem space */
106 unsigned long base;
107 /* vitual start address of the remaped pmem space */
108 unsigned char __iomem *vbase;
109 /* total size of the pmem space */
110 unsigned long size;
111 /* number of entries in the pmem space */
112 unsigned long num_entries;
113 /* pfn of the garbage page in memory */
114 unsigned long garbage_pfn;
115 /* index of the garbage page in the pmem space */
116 int garbage_index;
117 /* the bitmap for the region indicating which entries are allocated
118 * and which are free */
119 struct pmem_bits *bitmap;
120 /* indicates the region should not be managed with an allocator */
121 unsigned no_allocator;
122 /* indicates maps of this region should be cached, if a mix of
123 * cached and uncached is desired, set this and open the device with
124 * O_SYNC to get an uncached region */
125 unsigned cached;
126 unsigned buffered;
127 /* in no_allocator mode the first mapper gets the whole space and sets
128 * this flag */
129 unsigned allocated;
130 /* for debugging, creates a list of pmem file structs, the
131 * data_list_sem should be taken before pmem_data->sem if both are
132 * needed */
133 struct semaphore data_list_sem;
134 struct list_head data_list;
135 /* pmem_sem protects the bitmap array
136 * a write lock should be held when modifying entries in bitmap
137 * a read lock should be held when reading data from bits or
138 * dereferencing a pointer into bitmap
139 *
140 * pmem_data->sem protects the pmem data of a particular file
141 * Many of the function that require the pmem_data->sem have a non-
142 * locking version for when the caller is already holding that sem.
143 *
144 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
145 * down(pmem_data->sem) => down(bitmap_sem)
146 */
147 struct rw_semaphore bitmap_sem;
148
149 long (*ioctl)(struct file *, unsigned int, unsigned long);
150 int (*release)(struct inode *, struct file *);
151};
152
153static struct pmem_info pmem[PMEM_MAX_DEVICES];
154static int id_count;
155
eb450e89 156#define PMEM_IS_FREE(id, index) (!(pmem[id].bitmap[index].allocated))
9b843757
PM
157#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
158#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
159#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
160#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
161#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
162#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
163#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
164 PMEM_LEN(id, index))
165#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
166#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
167 PMEM_LEN(id, index))
168#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
169#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
170#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
171 (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
172
173static int pmem_release(struct inode *, struct file *);
174static int pmem_mmap(struct file *, struct vm_area_struct *);
175static int pmem_open(struct inode *, struct file *);
176static long pmem_ioctl(struct file *, unsigned int, unsigned long);
177
ef079a76 178const struct file_operations pmem_fops = {
9b843757
PM
179 .release = pmem_release,
180 .mmap = pmem_mmap,
181 .open = pmem_open,
182 .unlocked_ioctl = pmem_ioctl,
183};
184
185static int get_id(struct file *file)
186{
187 return MINOR(file->f_dentry->d_inode->i_rdev);
188}
189
190static int is_pmem_file(struct file *file)
191{
192 int id;
193
194 if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
195 return 0;
196 id = get_id(file);
197 if (unlikely(id >= PMEM_MAX_DEVICES))
198 return 0;
199 if (unlikely(file->f_dentry->d_inode->i_rdev !=
200 MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
201 return 0;
202 return 1;
203}
204
205static int has_allocation(struct file *file)
206{
207 struct pmem_data *data;
208 /* check is_pmem_file first if not accessed via pmem_file_ops */
209
210 if (unlikely(!file->private_data))
211 return 0;
212 data = (struct pmem_data *)file->private_data;
213 if (unlikely(data->index < 0))
214 return 0;
215 return 1;
216}
217
218static int is_master_owner(struct file *file)
219{
220 struct file *master_file;
221 struct pmem_data *data;
222 int put_needed, ret = 0;
223
224 if (!is_pmem_file(file) || !has_allocation(file))
225 return 0;
226 data = (struct pmem_data *)file->private_data;
227 if (PMEM_FLAGS_MASTERMAP & data->flags)
228 return 1;
229 master_file = fget_light(data->master_fd, &put_needed);
230 if (master_file && data->master_file == master_file)
231 ret = 1;
232 fput_light(master_file, put_needed);
233 return ret;
234}
235
236static int pmem_free(int id, int index)
237{
238 /* caller should hold the write lock on pmem_sem! */
239 int buddy, curr = index;
240 DLOG("index %d\n", index);
241
242 if (pmem[id].no_allocator) {
243 pmem[id].allocated = 0;
244 return 0;
245 }
246 /* clean up the bitmap, merging any buddies */
247 pmem[id].bitmap[curr].allocated = 0;
248 /* find a slots buddy Buddy# = Slot# ^ (1 << order)
249 * if the buddy is also free merge them
250 * repeat until the buddy is not free or end of the bitmap is reached
251 */
252 do {
253 buddy = PMEM_BUDDY_INDEX(id, curr);
254 if (PMEM_IS_FREE(id, buddy) &&
255 PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
256 PMEM_ORDER(id, buddy)++;
257 PMEM_ORDER(id, curr)++;
258 curr = min(buddy, curr);
259 } else {
260 break;
261 }
262 } while (curr < pmem[id].num_entries);
263
264 return 0;
265}
266
267static void pmem_revoke(struct file *file, struct pmem_data *data);
268
269static int pmem_release(struct inode *inode, struct file *file)
270{
271 struct pmem_data *data = (struct pmem_data *)file->private_data;
272 struct pmem_region_node *region_node;
273 struct list_head *elt, *elt2;
274 int id = get_id(file), ret = 0;
275
276
277 down(&pmem[id].data_list_sem);
278 /* if this file is a master, revoke all the memory in the connected
279 * files */
280 if (PMEM_FLAGS_MASTERMAP & data->flags) {
281 struct pmem_data *sub_data;
282 list_for_each(elt, &pmem[id].data_list) {
283 sub_data = list_entry(elt, struct pmem_data, list);
284 down_read(&sub_data->sem);
285 if (PMEM_IS_SUBMAP(sub_data) &&
286 file == sub_data->master_file) {
287 up_read(&sub_data->sem);
288 pmem_revoke(file, sub_data);
289 } else
290 up_read(&sub_data->sem);
291 }
292 }
293 list_del(&data->list);
294 up(&pmem[id].data_list_sem);
295
296
297 down_write(&data->sem);
298
299 /* if its not a conencted file and it has an allocation, free it */
300 if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
301 down_write(&pmem[id].bitmap_sem);
302 ret = pmem_free(id, data->index);
303 up_write(&pmem[id].bitmap_sem);
304 }
305
306 /* if this file is a submap (mapped, connected file), downref the
307 * task struct */
308 if (PMEM_FLAGS_SUBMAP & data->flags)
309 if (data->task) {
310 put_task_struct(data->task);
311 data->task = NULL;
312 }
313
314 file->private_data = NULL;
315
316 list_for_each_safe(elt, elt2, &data->region_list) {
317 region_node = list_entry(elt, struct pmem_region_node, list);
318 list_del(elt);
319 kfree(region_node);
320 }
321 BUG_ON(!list_empty(&data->region_list));
322
323 up_write(&data->sem);
324 kfree(data);
325 if (pmem[id].release)
326 ret = pmem[id].release(inode, file);
327
328 return ret;
329}
330
331static int pmem_open(struct inode *inode, struct file *file)
332{
333 struct pmem_data *data;
334 int id = get_id(file);
335 int ret = 0;
336
337 DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
338 /* setup file->private_data to indicate its unmapped */
339 /* you can only open a pmem device one time */
340 if (file->private_data != NULL)
341 return -1;
342 data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
343 if (!data) {
344 printk("pmem: unable to allocate memory for pmem metadata.");
345 return -1;
346 }
347 data->flags = 0;
348 data->index = -1;
349 data->task = NULL;
350 data->vma = NULL;
351 data->pid = 0;
352 data->master_file = NULL;
353#if PMEM_DEBUG
354 data->ref = 0;
355#endif
356 INIT_LIST_HEAD(&data->region_list);
357 init_rwsem(&data->sem);
358
359 file->private_data = data;
360 INIT_LIST_HEAD(&data->list);
361
362 down(&pmem[id].data_list_sem);
363 list_add(&data->list, &pmem[id].data_list);
364 up(&pmem[id].data_list_sem);
365 return ret;
366}
367
368static unsigned long pmem_order(unsigned long len)
369{
370 int i;
371
372 len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
373 len--;
374 for (i = 0; i < sizeof(len)*8; i++)
375 if (len >> i == 0)
376 break;
377 return i;
378}
379
380static int pmem_allocate(int id, unsigned long len)
381{
382 /* caller should hold the write lock on pmem_sem! */
383 /* return the corresponding pdata[] entry */
384 int curr = 0;
385 int end = pmem[id].num_entries;
386 int best_fit = -1;
387 unsigned long order = pmem_order(len);
388
389 if (pmem[id].no_allocator) {
390 DLOG("no allocator");
391 if ((len > pmem[id].size) || pmem[id].allocated)
392 return -1;
393 pmem[id].allocated = 1;
394 return len;
395 }
396
397 if (order > PMEM_MAX_ORDER)
398 return -1;
399 DLOG("order %lx\n", order);
400
401 /* look through the bitmap:
ef079a76
NC
402 * if you find a free slot of the correct order use it
403 * otherwise, use the best fit (smallest with size > order) slot
9b843757
PM
404 */
405 while (curr < end) {
406 if (PMEM_IS_FREE(id, curr)) {
407 if (PMEM_ORDER(id, curr) == (unsigned char)order) {
408 /* set the not free bit and clear others */
409 best_fit = curr;
410 break;
411 }
412 if (PMEM_ORDER(id, curr) > (unsigned char)order &&
413 (best_fit < 0 ||
414 PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
415 best_fit = curr;
416 }
417 curr = PMEM_NEXT_INDEX(id, curr);
418 }
419
420 /* if best_fit < 0, there are no suitable slots,
421 * return an error
422 */
423 if (best_fit < 0) {
424 printk("pmem: no space left to allocate!\n");
425 return -1;
426 }
427
428 /* now partition the best fit:
ef079a76
NC
429 * split the slot into 2 buddies of order - 1
430 * repeat until the slot is of the correct order
9b843757
PM
431 */
432 while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
433 int buddy;
434 PMEM_ORDER(id, best_fit) -= 1;
435 buddy = PMEM_BUDDY_INDEX(id, best_fit);
436 PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
437 }
438 pmem[id].bitmap[best_fit].allocated = 1;
439 return best_fit;
440}
441
442static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
443{
444 int id = get_id(file);
445#ifdef pgprot_noncached
446 if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
447 return pgprot_noncached(vma_prot);
448#endif
449#ifdef pgprot_ext_buffered
450 else if (pmem[id].buffered)
451 return pgprot_ext_buffered(vma_prot);
452#endif
453 return vma_prot;
454}
455
456static unsigned long pmem_start_addr(int id, struct pmem_data *data)
457{
458 if (pmem[id].no_allocator)
459 return PMEM_START_ADDR(id, 0);
460 else
461 return PMEM_START_ADDR(id, data->index);
462
463}
464
465static void *pmem_start_vaddr(int id, struct pmem_data *data)
466{
467 return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
468}
469
470static unsigned long pmem_len(int id, struct pmem_data *data)
471{
472 if (pmem[id].no_allocator)
473 return data->index;
474 else
475 return PMEM_LEN(id, data->index);
476}
477
478static int pmem_map_garbage(int id, struct vm_area_struct *vma,
479 struct pmem_data *data, unsigned long offset,
480 unsigned long len)
481{
482 int i, garbage_pages = len >> PAGE_SHIFT;
483
484 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
485 for (i = 0; i < garbage_pages; i++) {
486 if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
487 pmem[id].garbage_pfn))
488 return -EAGAIN;
489 }
490 return 0;
491}
492
493static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
494 struct pmem_data *data, unsigned long offset,
495 unsigned long len)
496{
497 int garbage_pages;
498 DLOG("unmap offset %lx len %lx\n", offset, len);
499
500 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
501
502 garbage_pages = len >> PAGE_SHIFT;
503 zap_page_range(vma, vma->vm_start + offset, len, NULL);
504 pmem_map_garbage(id, vma, data, offset, len);
505 return 0;
506}
507
508static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
509 struct pmem_data *data, unsigned long offset,
510 unsigned long len)
511{
512 DLOG("map offset %lx len %lx\n", offset, len);
513 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
514 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
515 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
516 BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
517
518 if (io_remap_pfn_range(vma, vma->vm_start + offset,
519 (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
520 len, vma->vm_page_prot)) {
521 return -EAGAIN;
522 }
523 return 0;
524}
525
526static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
527 struct pmem_data *data, unsigned long offset,
528 unsigned long len)
529{
530 /* hold the mm semp for the vma you are modifying when you call this */
531 BUG_ON(!vma);
532 zap_page_range(vma, vma->vm_start + offset, len, NULL);
533 return pmem_map_pfn_range(id, vma, data, offset, len);
534}
535
536static void pmem_vma_open(struct vm_area_struct *vma)
537{
538 struct file *file = vma->vm_file;
539 struct pmem_data *data = file->private_data;
540 int id = get_id(file);
541 /* this should never be called as we don't support copying pmem
542 * ranges via fork */
543 BUG_ON(!has_allocation(file));
544 down_write(&data->sem);
545 /* remap the garbage pages, forkers don't get access to the data */
546 pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
547 up_write(&data->sem);
548}
549
550static void pmem_vma_close(struct vm_area_struct *vma)
551{
552 struct file *file = vma->vm_file;
553 struct pmem_data *data = file->private_data;
554
555 DLOG("current %u ppid %u file %p count %d\n", current->pid,
556 current->parent->pid, file, file_count(file));
557 if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
558 printk(KERN_WARNING "pmem: something is very wrong, you are "
559 "closing a vm backing an allocation that doesn't "
560 "exist!\n");
561 return;
562 }
563 down_write(&data->sem);
564 if (data->vma == vma) {
565 data->vma = NULL;
566 if ((data->flags & PMEM_FLAGS_CONNECTED) &&
567 (data->flags & PMEM_FLAGS_SUBMAP))
568 data->flags |= PMEM_FLAGS_UNSUBMAP;
569 }
570 /* the kernel is going to free this vma now anyway */
571 up_write(&data->sem);
572}
573
574static struct vm_operations_struct vm_ops = {
575 .open = pmem_vma_open,
576 .close = pmem_vma_close,
577};
578
579static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
580{
581 struct pmem_data *data;
582 int index;
583 unsigned long vma_size = vma->vm_end - vma->vm_start;
584 int ret = 0, id = get_id(file);
585
586 if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
587#if PMEM_DEBUG
588 printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
589 " and a multiple of pages_size.\n");
590#endif
591 return -EINVAL;
592 }
593
594 data = (struct pmem_data *)file->private_data;
595 down_write(&data->sem);
596 /* check this file isn't already mmaped, for submaps check this file
597 * has never been mmaped */
598 if ((data->flags & PMEM_FLAGS_MASTERMAP) ||
599 (data->flags & PMEM_FLAGS_SUBMAP) ||
600 (data->flags & PMEM_FLAGS_UNSUBMAP)) {
601#if PMEM_DEBUG
602 printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
603 "this file is already mmaped. %x\n", data->flags);
604#endif
605 ret = -EINVAL;
606 goto error;
607 }
608 /* if file->private_data == unalloced, alloc*/
609 if (data && data->index == -1) {
610 down_write(&pmem[id].bitmap_sem);
611 index = pmem_allocate(id, vma->vm_end - vma->vm_start);
612 up_write(&pmem[id].bitmap_sem);
613 data->index = index;
614 }
615 /* either no space was available or an error occured */
616 if (!has_allocation(file)) {
617 ret = -EINVAL;
618 printk("pmem: could not find allocation for map.\n");
619 goto error;
620 }
621
622 if (pmem_len(id, data) < vma_size) {
623#if PMEM_DEBUG
624 printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
625 "size of backing region [%lu].\n", vma_size,
626 pmem_len(id, data));
627#endif
628 ret = -EINVAL;
629 goto error;
630 }
631
632 vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
633 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);
634
635 if (data->flags & PMEM_FLAGS_CONNECTED) {
636 struct pmem_region_node *region_node;
637 struct list_head *elt;
638 if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
639 printk("pmem: mmap failed in kernel!\n");
640 ret = -EAGAIN;
641 goto error;
642 }
643 list_for_each(elt, &data->region_list) {
644 region_node = list_entry(elt, struct pmem_region_node,
645 list);
646 DLOG("remapping file: %p %lx %lx\n", file,
647 region_node->region.offset,
648 region_node->region.len);
649 if (pmem_remap_pfn_range(id, vma, data,
650 region_node->region.offset,
651 region_node->region.len)) {
652 ret = -EAGAIN;
653 goto error;
654 }
655 }
656 data->flags |= PMEM_FLAGS_SUBMAP;
657 get_task_struct(current->group_leader);
658 data->task = current->group_leader;
659 data->vma = vma;
660#if PMEM_DEBUG
661 data->pid = current->pid;
662#endif
663 DLOG("submmapped file %p vma %p pid %u\n", file, vma,
664 current->pid);
665 } else {
666 if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
667 printk(KERN_INFO "pmem: mmap failed in kernel!\n");
668 ret = -EAGAIN;
669 goto error;
670 }
671 data->flags |= PMEM_FLAGS_MASTERMAP;
672 data->pid = current->pid;
673 }
674 vma->vm_ops = &vm_ops;
675error:
676 up_write(&data->sem);
677 return ret;
678}
679
680/* the following are the api for accessing pmem regions by other drivers
681 * from inside the kernel */
682int get_pmem_user_addr(struct file *file, unsigned long *start,
683 unsigned long *len)
684{
685 struct pmem_data *data;
686 if (!is_pmem_file(file) || !has_allocation(file)) {
687#if PMEM_DEBUG
688 printk(KERN_INFO "pmem: requested pmem data from invalid"
689 "file.\n");
690#endif
691 return -1;
692 }
693 data = (struct pmem_data *)file->private_data;
694 down_read(&data->sem);
695 if (data->vma) {
696 *start = data->vma->vm_start;
697 *len = data->vma->vm_end - data->vma->vm_start;
698 } else {
699 *start = 0;
700 *len = 0;
701 }
702 up_read(&data->sem);
703 return 0;
704}
705
706int get_pmem_addr(struct file *file, unsigned long *start,
707 unsigned long *vstart, unsigned long *len)
708{
709 struct pmem_data *data;
710 int id;
711
df16b962 712 if (!is_pmem_file(file) || !has_allocation(file))
9b843757 713 return -1;
9b843757
PM
714
715 data = (struct pmem_data *)file->private_data;
716 if (data->index == -1) {
717#if PMEM_DEBUG
718 printk(KERN_INFO "pmem: requested pmem data from file with no "
719 "allocation.\n");
720 return -1;
721#endif
722 }
723 id = get_id(file);
724
725 down_read(&data->sem);
726 *start = pmem_start_addr(id, data);
727 *len = pmem_len(id, data);
728 *vstart = (unsigned long)pmem_start_vaddr(id, data);
729 up_read(&data->sem);
730#if PMEM_DEBUG
731 down_write(&data->sem);
732 data->ref++;
733 up_write(&data->sem);
734#endif
735 return 0;
736}
737
738int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
739 unsigned long *len, struct file **filp)
740{
741 struct file *file;
742
743 file = fget(fd);
744 if (unlikely(file == NULL)) {
745 printk(KERN_INFO "pmem: requested data from file descriptor "
746 "that doesn't exist.");
747 return -1;
748 }
749
750 if (get_pmem_addr(file, start, vstart, len))
751 goto end;
752
753 if (filp)
754 *filp = file;
755 return 0;
756end:
757 fput(file);
758 return -1;
759}
760
761void put_pmem_file(struct file *file)
762{
763 struct pmem_data *data;
764 int id;
765
766 if (!is_pmem_file(file))
767 return;
768 id = get_id(file);
769 data = (struct pmem_data *)file->private_data;
770#if PMEM_DEBUG
771 down_write(&data->sem);
772 if (data->ref == 0) {
773 printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
774 pmem[id].dev.name, data->pid);
775 BUG();
776 }
777 data->ref--;
778 up_write(&data->sem);
779#endif
780 fput(file);
781}
782
783void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
784{
785 struct pmem_data *data;
786 int id;
787 void *vaddr;
788 struct pmem_region_node *region_node;
789 struct list_head *elt;
790 void *flush_start, *flush_end;
791
df16b962 792 if (!is_pmem_file(file) || !has_allocation(file))
9b843757 793 return;
9b843757
PM
794
795 id = get_id(file);
796 data = (struct pmem_data *)file->private_data;
797 if (!pmem[id].cached)
798 return;
799
800 down_read(&data->sem);
801 vaddr = pmem_start_vaddr(id, data);
802 /* if this isn't a submmapped file, flush the whole thing */
803 if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
804 dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
805 goto end;
806 }
807 /* otherwise, flush the region of the file we are drawing */
808 list_for_each(elt, &data->region_list) {
809 region_node = list_entry(elt, struct pmem_region_node, list);
810 if ((offset >= region_node->region.offset) &&
811 ((offset + len) <= (region_node->region.offset +
812 region_node->region.len))) {
813 flush_start = vaddr + region_node->region.offset;
814 flush_end = flush_start + region_node->region.len;
815 dmac_flush_range(flush_start, flush_end);
816 break;
817 }
818 }
819end:
820 up_read(&data->sem);
821}
822
823static int pmem_connect(unsigned long connect, struct file *file)
824{
825 struct pmem_data *data = (struct pmem_data *)file->private_data;
826 struct pmem_data *src_data;
827 struct file *src_file;
828 int ret = 0, put_needed;
829
830 down_write(&data->sem);
831 /* retrieve the src file and check it is a pmem file with an alloc */
832 src_file = fget_light(connect, &put_needed);
833 DLOG("connect %p to %p\n", file, src_file);
834 if (!src_file) {
df16b962 835 printk(KERN_INFO "pmem: src file not found!\n");
9b843757
PM
836 ret = -EINVAL;
837 goto err_no_file;
838 }
839 if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
840 printk(KERN_INFO "pmem: src file is not a pmem file or has no "
841 "alloc!\n");
842 ret = -EINVAL;
843 goto err_bad_file;
844 }
845 src_data = (struct pmem_data *)src_file->private_data;
846
847 if (has_allocation(file) && (data->index != src_data->index)) {
eb450e89
CC
848 printk(KERN_INFO "pmem: file is already mapped but doesn't "
849 "match this src_file!\n");
9b843757
PM
850 ret = -EINVAL;
851 goto err_bad_file;
852 }
853 data->index = src_data->index;
854 data->flags |= PMEM_FLAGS_CONNECTED;
855 data->master_fd = connect;
856 data->master_file = src_file;
857
858err_bad_file:
859 fput_light(src_file, put_needed);
860err_no_file:
861 up_write(&data->sem);
862 return ret;
863}
864
865static void pmem_unlock_data_and_mm(struct pmem_data *data,
866 struct mm_struct *mm)
867{
868 up_write(&data->sem);
869 if (mm != NULL) {
870 up_write(&mm->mmap_sem);
871 mmput(mm);
872 }
873}
874
875static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
876 struct mm_struct **locked_mm)
877{
878 int ret = 0;
879 struct mm_struct *mm = NULL;
880 *locked_mm = NULL;
881lock_mm:
882 down_read(&data->sem);
883 if (PMEM_IS_SUBMAP(data)) {
884 mm = get_task_mm(data->task);
885 if (!mm) {
886#if PMEM_DEBUG
df16b962 887 printk(KERN_DEBUG "pmem: can't remap task is gone!\n");
9b843757
PM
888#endif
889 up_read(&data->sem);
890 return -1;
891 }
892 }
893 up_read(&data->sem);
894
895 if (mm)
896 down_write(&mm->mmap_sem);
897
898 down_write(&data->sem);
899 /* check that the file didn't get mmaped before we could take the
900 * data sem, this should be safe b/c you can only submap each file
901 * once */
902 if (PMEM_IS_SUBMAP(data) && !mm) {
903 pmem_unlock_data_and_mm(data, mm);
904 up_write(&data->sem);
905 goto lock_mm;
906 }
907 /* now check that vma.mm is still there, it could have been
908 * deleted by vma_close before we could get the data->sem */
909 if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
910 /* might as well release this */
911 if (data->flags & PMEM_FLAGS_SUBMAP) {
912 put_task_struct(data->task);
913 data->task = NULL;
914 /* lower the submap flag to show the mm is gone */
915 data->flags &= ~(PMEM_FLAGS_SUBMAP);
916 }
917 pmem_unlock_data_and_mm(data, mm);
918 return -1;
919 }
920 *locked_mm = mm;
921 return ret;
922}
923
924int pmem_remap(struct pmem_region *region, struct file *file,
925 unsigned operation)
926{
927 int ret;
928 struct pmem_region_node *region_node;
929 struct mm_struct *mm = NULL;
930 struct list_head *elt, *elt2;
931 int id = get_id(file);
932 struct pmem_data *data = (struct pmem_data *)file->private_data;
933
934 /* pmem region must be aligned on a page boundry */
935 if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
936 !PMEM_IS_PAGE_ALIGNED(region->len))) {
937#if PMEM_DEBUG
eb450e89
CC
938 printk(KERN_DEBUG "pmem: request for unaligned pmem "
939 "suballocation %lx %lx\n", region->offset, region->len);
9b843757
PM
940#endif
941 return -EINVAL;
942 }
943
944 /* if userspace requests a region of len 0, there's nothing to do */
945 if (region->len == 0)
946 return 0;
947
948 /* lock the mm and data */
949 ret = pmem_lock_data_and_mm(file, data, &mm);
950 if (ret)
951 return 0;
952
953 /* only the owner of the master file can remap the client fds
954 * that back in it */
955 if (!is_master_owner(file)) {
956#if PMEM_DEBUG
957 printk("pmem: remap requested from non-master process\n");
958#endif
959 ret = -EINVAL;
960 goto err;
961 }
962
963 /* check that the requested range is within the src allocation */
964 if (unlikely((region->offset > pmem_len(id, data)) ||
965 (region->len > pmem_len(id, data)) ||
966 (region->offset + region->len > pmem_len(id, data)))) {
967#if PMEM_DEBUG
968 printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
969#endif
970 ret = -EINVAL;
971 goto err;
972 }
973
974 if (operation == PMEM_MAP) {
975 region_node = kmalloc(sizeof(struct pmem_region_node),
976 GFP_KERNEL);
977 if (!region_node) {
978 ret = -ENOMEM;
979#if PMEM_DEBUG
980 printk(KERN_INFO "No space to allocate metadata!");
981#endif
982 goto err;
983 }
984 region_node->region = *region;
985 list_add(&region_node->list, &data->region_list);
986 } else if (operation == PMEM_UNMAP) {
987 int found = 0;
988 list_for_each_safe(elt, elt2, &data->region_list) {
989 region_node = list_entry(elt, struct pmem_region_node,
990 list);
991 if (region->len == 0 ||
992 (region_node->region.offset == region->offset &&
993 region_node->region.len == region->len)) {
994 list_del(elt);
995 kfree(region_node);
996 found = 1;
997 }
998 }
999 if (!found) {
1000#if PMEM_DEBUG
1001 printk("pmem: Unmap region does not map any mapped "
1002 "region!");
1003#endif
1004 ret = -EINVAL;
1005 goto err;
1006 }
1007 }
1008
1009 if (data->vma && PMEM_IS_SUBMAP(data)) {
1010 if (operation == PMEM_MAP)
1011 ret = pmem_remap_pfn_range(id, data->vma, data,
1012 region->offset, region->len);
1013 else if (operation == PMEM_UNMAP)
1014 ret = pmem_unmap_pfn_range(id, data->vma, data,
1015 region->offset, region->len);
1016 }
1017
1018err:
1019 pmem_unlock_data_and_mm(data, mm);
1020 return ret;
1021}
1022
1023static void pmem_revoke(struct file *file, struct pmem_data *data)
1024{
1025 struct pmem_region_node *region_node;
1026 struct list_head *elt, *elt2;
1027 struct mm_struct *mm = NULL;
1028 int id = get_id(file);
1029 int ret = 0;
1030
1031 data->master_file = NULL;
1032 ret = pmem_lock_data_and_mm(file, data, &mm);
1033 /* if lock_data_and_mm fails either the task that mapped the fd, or
1034 * the vma that mapped it have already gone away, nothing more
1035 * needs to be done */
1036 if (ret)
1037 return;
1038 /* unmap everything */
1039 /* delete the regions and region list nothing is mapped any more */
1040 if (data->vma)
1041 list_for_each_safe(elt, elt2, &data->region_list) {
1042 region_node = list_entry(elt, struct pmem_region_node,
1043 list);
1044 pmem_unmap_pfn_range(id, data->vma, data,
1045 region_node->region.offset,
1046 region_node->region.len);
1047 list_del(elt);
1048 kfree(region_node);
1049 }
1050 /* delete the master file */
1051 pmem_unlock_data_and_mm(data, mm);
1052}
1053
1054static void pmem_get_size(struct pmem_region *region, struct file *file)
1055{
1056 struct pmem_data *data = (struct pmem_data *)file->private_data;
1057 int id = get_id(file);
1058
1059 if (!has_allocation(file)) {
1060 region->offset = 0;
1061 region->len = 0;
1062 return;
1063 } else {
1064 region->offset = pmem_start_addr(id, data);
1065 region->len = pmem_len(id, data);
1066 }
1067 DLOG("offset %lx len %lx\n", region->offset, region->len);
1068}
1069
1070
1071static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1072{
1073 struct pmem_data *data;
1074 int id = get_id(file);
1075
1076 switch (cmd) {
1077 case PMEM_GET_PHYS:
1078 {
1079 struct pmem_region region;
1080 DLOG("get_phys\n");
1081 if (!has_allocation(file)) {
1082 region.offset = 0;
1083 region.len = 0;
1084 } else {
1085 data = (struct pmem_data *)file->private_data;
1086 region.offset = pmem_start_addr(id, data);
1087 region.len = pmem_len(id, data);
1088 }
eb450e89
CC
1089 printk(KERN_INFO "pmem: request for physical address "
1090 "of pmem region from process %d.\n", current->pid);
9b843757
PM
1091 if (copy_to_user((void __user *)arg, &region,
1092 sizeof(struct pmem_region)))
1093 return -EFAULT;
1094 break;
1095 }
1096 case PMEM_MAP:
1097 {
1098 struct pmem_region region;
1099 if (copy_from_user(&region, (void __user *)arg,
1100 sizeof(struct pmem_region)))
1101 return -EFAULT;
1102 data = (struct pmem_data *)file->private_data;
1103 return pmem_remap(&region, file, PMEM_MAP);
1104 }
1105 break;
1106 case PMEM_UNMAP:
1107 {
1108 struct pmem_region region;
1109 if (copy_from_user(&region, (void __user *)arg,
1110 sizeof(struct pmem_region)))
1111 return -EFAULT;
1112 data = (struct pmem_data *)file->private_data;
1113 return pmem_remap(&region, file, PMEM_UNMAP);
1114 break;
1115 }
1116 case PMEM_GET_SIZE:
1117 {
1118 struct pmem_region region;
1119 DLOG("get_size\n");
1120 pmem_get_size(&region, file);
1121 if (copy_to_user((void __user *)arg, &region,
1122 sizeof(struct pmem_region)))
1123 return -EFAULT;
1124 break;
1125 }
1126 case PMEM_GET_TOTAL_SIZE:
1127 {
1128 struct pmem_region region;
1129 DLOG("get total size\n");
1130 region.offset = 0;
1131 get_id(file);
1132 region.len = pmem[id].size;
1133 if (copy_to_user((void __user *)arg, &region,
1134 sizeof(struct pmem_region)))
1135 return -EFAULT;
1136 break;
1137 }
1138 case PMEM_ALLOCATE:
1139 {
1140 if (has_allocation(file))
1141 return -EINVAL;
1142 data = (struct pmem_data *)file->private_data;
1143 data->index = pmem_allocate(id, arg);
1144 break;
1145 }
1146 case PMEM_CONNECT:
1147 DLOG("connect\n");
1148 return pmem_connect(arg, file);
1149 break;
1150 default:
1151 if (pmem[id].ioctl)
1152 return pmem[id].ioctl(file, cmd, arg);
1153 return -EINVAL;
1154 }
1155 return 0;
1156}
1157
1158#if PMEM_DEBUG
1159static ssize_t debug_open(struct inode *inode, struct file *file)
1160{
1161 file->private_data = inode->i_private;
1162 return 0;
1163}
1164
1165static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
1166 loff_t *ppos)
1167{
1168 struct list_head *elt, *elt2;
1169 struct pmem_data *data;
1170 struct pmem_region_node *region_node;
1171 int id = (int)file->private_data;
1172 const int debug_bufmax = 4096;
1173 static char buffer[4096];
1174 int n = 0;
1175
1176 DLOG("debug open\n");
1177 n = scnprintf(buffer, debug_bufmax,
1178 "pid #: mapped regions (offset, len) (offset,len)...\n");
1179
1180 down(&pmem[id].data_list_sem);
1181 list_for_each(elt, &pmem[id].data_list) {
1182 data = list_entry(elt, struct pmem_data, list);
1183 down_read(&data->sem);
1184 n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
1185 data->pid);
1186 list_for_each(elt2, &data->region_list) {
1187 region_node = list_entry(elt2, struct pmem_region_node,
1188 list);
1189 n += scnprintf(buffer + n, debug_bufmax - n,
1190 "(%lx,%lx) ",
1191 region_node->region.offset,
1192 region_node->region.len);
1193 }
1194 n += scnprintf(buffer + n, debug_bufmax - n, "\n");
1195 up_read(&data->sem);
1196 }
1197 up(&pmem[id].data_list_sem);
1198
1199 n++;
1200 buffer[n] = 0;
1201 return simple_read_from_buffer(buf, count, ppos, buffer, n);
1202}
1203
1204static struct file_operations debug_fops = {
1205 .read = debug_read,
1206 .open = debug_open,
1207};
1208#endif
1209
1210#if 0
1211static struct miscdevice pmem_dev = {
1212 .name = "pmem",
1213 .fops = &pmem_fops,
1214};
1215#endif
1216
1217int pmem_setup(struct android_pmem_platform_data *pdata,
1218 long (*ioctl)(struct file *, unsigned int, unsigned long),
1219 int (*release)(struct inode *, struct file *))
1220{
1221 int err = 0;
1222 int i, index = 0;
1223 int id = id_count;
1224 id_count++;
1225
1226 pmem[id].no_allocator = pdata->no_allocator;
1227 pmem[id].cached = pdata->cached;
1228 pmem[id].buffered = pdata->buffered;
1229 pmem[id].base = pdata->start;
1230 pmem[id].size = pdata->size;
1231 pmem[id].ioctl = ioctl;
1232 pmem[id].release = release;
1233 init_rwsem(&pmem[id].bitmap_sem);
1234 init_MUTEX(&pmem[id].data_list_sem);
1235 INIT_LIST_HEAD(&pmem[id].data_list);
1236 pmem[id].dev.name = pdata->name;
1237 pmem[id].dev.minor = id;
1238 pmem[id].dev.fops = &pmem_fops;
1239 printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
1240
1241 err = misc_register(&pmem[id].dev);
1242 if (err) {
1243 printk(KERN_ALERT "Unable to register pmem driver!\n");
1244 goto err_cant_register_device;
1245 }
1246 pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
1247
7a6cb0d5 1248 pmem[id].bitmap = kcalloc(pmem[id].num_entries,
9b843757
PM
1249 sizeof(struct pmem_bits), GFP_KERNEL);
1250 if (!pmem[id].bitmap)
1251 goto err_no_mem_for_metadata;
1252
9b843757
PM
1253 for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
1254 if ((pmem[id].num_entries) & 1<<i) {
1255 PMEM_ORDER(id, index) = i;
1256 index = PMEM_NEXT_INDEX(id, index);
1257 }
1258 }
1259
1260 if (pmem[id].cached)
1261 pmem[id].vbase = ioremap_cached(pmem[id].base,
1262 pmem[id].size);
1263#ifdef ioremap_ext_buffered
1264 else if (pmem[id].buffered)
1265 pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
1266 pmem[id].size);
1267#endif
1268 else
1269 pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
1270
1271 if (pmem[id].vbase == 0)
1272 goto error_cant_remap;
1273
1274 pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
1275 if (pmem[id].no_allocator)
1276 pmem[id].allocated = 0;
1277
1278#if PMEM_DEBUG
1279 debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
1280 &debug_fops);
1281#endif
1282 return 0;
1283error_cant_remap:
1284 kfree(pmem[id].bitmap);
1285err_no_mem_for_metadata:
1286 misc_deregister(&pmem[id].dev);
1287err_cant_register_device:
1288 return -1;
1289}
1290
1291static int pmem_probe(struct platform_device *pdev)
1292{
1293 struct android_pmem_platform_data *pdata;
1294
1295 if (!pdev || !pdev->dev.platform_data) {
1296 printk(KERN_ALERT "Unable to probe pmem!\n");
1297 return -1;
1298 }
1299 pdata = pdev->dev.platform_data;
1300 return pmem_setup(pdata, NULL, NULL);
1301}
1302
1303
1304static int pmem_remove(struct platform_device *pdev)
1305{
1306 int id = pdev->id;
1307 __free_page(pfn_to_page(pmem[id].garbage_pfn));
1308 misc_deregister(&pmem[id].dev);
1309 return 0;
1310}
1311
1312static struct platform_driver pmem_driver = {
1313 .probe = pmem_probe,
1314 .remove = pmem_remove,
1315 .driver = { .name = "android_pmem" }
1316};
1317
1318
1319static int __init pmem_init(void)
1320{
1321 return platform_driver_register(&pmem_driver);
1322}
1323
1324static void __exit pmem_exit(void)
1325{
1326 platform_driver_unregister(&pmem_driver);
1327}
1328
1329module_init(pmem_init);
1330module_exit(pmem_exit);
1331