]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/drivers/char/mem.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * Added devfs support. | |
7 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> | |
8 | * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> | |
9 | */ | |
10 | ||
11 | #include <linux/config.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/miscdevice.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/vmalloc.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/random.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/raw.h> | |
20 | #include <linux/tty.h> | |
21 | #include <linux/capability.h> | |
22 | #include <linux/smp_lock.h> | |
23 | #include <linux/devfs_fs_kernel.h> | |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/device.h> | |
50b1fdbd VG |
26 | #include <linux/highmem.h> |
27 | #include <linux/crash_dump.h> | |
1da177e4 | 28 | #include <linux/backing-dev.h> |
315c215c | 29 | #include <linux/bootmem.h> |
1da177e4 LT |
30 | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | ||
34 | #ifdef CONFIG_IA64 | |
35 | # include <linux/efi.h> | |
36 | #endif | |
37 | ||
1da177e4 LT |
38 | /* |
39 | * Architectures vary in how they handle caching for addresses | |
40 | * outside of main memory. | |
41 | * | |
42 | */ | |
43 | static inline int uncached_access(struct file *file, unsigned long addr) | |
44 | { | |
45 | #if defined(__i386__) | |
46 | /* | |
47 | * On the PPro and successors, the MTRRs are used to set | |
48 | * memory types for physical addresses outside main memory, | |
49 | * so blindly setting PCD or PWT on those pages is wrong. | |
50 | * For Pentiums and earlier, the surround logic should disable | |
51 | * caching for the high addresses through the KEN pin, but | |
52 | * we maintain the tradition of paranoia in this code. | |
53 | */ | |
54 | if (file->f_flags & O_SYNC) | |
55 | return 1; | |
56 | return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || | |
57 | test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || | |
58 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | |
59 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) | |
60 | && addr >= __pa(high_memory); | |
61 | #elif defined(__x86_64__) | |
62 | /* | |
63 | * This is broken because it can generate memory type aliases, | |
64 | * which can cause cache corruptions | |
65 | * But it is only available for root and we have to be bug-to-bug | |
66 | * compatible with i386. | |
67 | */ | |
68 | if (file->f_flags & O_SYNC) | |
69 | return 1; | |
70 | /* same behaviour as i386. PAT always set to cached and MTRRs control the | |
71 | caching behaviour. | |
72 | Hopefully a full PAT implementation will fix that soon. */ | |
73 | return 0; | |
74 | #elif defined(CONFIG_IA64) | |
75 | /* | |
76 | * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. | |
77 | */ | |
78 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); | |
79 | #else | |
80 | /* | |
81 | * Accessing memory above the top the kernel knows about or through a file pointer | |
82 | * that was marked O_SYNC will be done non-cached. | |
83 | */ | |
84 | if (file->f_flags & O_SYNC) | |
85 | return 1; | |
86 | return addr >= __pa(high_memory); | |
87 | #endif | |
88 | } | |
89 | ||
90 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE | |
91 | static inline int valid_phys_addr_range(unsigned long addr, size_t *count) | |
92 | { | |
93 | unsigned long end_mem; | |
94 | ||
95 | end_mem = __pa(high_memory); | |
96 | if (addr >= end_mem) | |
97 | return 0; | |
98 | ||
99 | if (*count > end_mem - addr) | |
100 | *count = end_mem - addr; | |
101 | ||
102 | return 1; | |
103 | } | |
104 | #endif | |
105 | ||
106 | /* | |
107 | * This funcion reads the *physical* memory. The f_pos points directly to the | |
108 | * memory location. | |
109 | */ | |
110 | static ssize_t read_mem(struct file * file, char __user * buf, | |
111 | size_t count, loff_t *ppos) | |
112 | { | |
113 | unsigned long p = *ppos; | |
114 | ssize_t read, sz; | |
115 | char *ptr; | |
116 | ||
117 | if (!valid_phys_addr_range(p, &count)) | |
118 | return -EFAULT; | |
119 | read = 0; | |
120 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
121 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
122 | if (p < PAGE_SIZE) { | |
123 | sz = PAGE_SIZE - p; | |
124 | if (sz > count) | |
125 | sz = count; | |
126 | if (sz > 0) { | |
127 | if (clear_user(buf, sz)) | |
128 | return -EFAULT; | |
129 | buf += sz; | |
130 | p += sz; | |
131 | count -= sz; | |
132 | read += sz; | |
133 | } | |
134 | } | |
135 | #endif | |
136 | ||
137 | while (count > 0) { | |
138 | /* | |
139 | * Handle first page in case it's not aligned | |
140 | */ | |
141 | if (-p & (PAGE_SIZE - 1)) | |
142 | sz = -p & (PAGE_SIZE - 1); | |
143 | else | |
144 | sz = PAGE_SIZE; | |
145 | ||
146 | sz = min_t(unsigned long, sz, count); | |
147 | ||
148 | /* | |
149 | * On ia64 if a page has been mapped somewhere as | |
150 | * uncached, then it must also be accessed uncached | |
151 | * by the kernel or data corruption may occur | |
152 | */ | |
153 | ptr = xlate_dev_mem_ptr(p); | |
154 | ||
155 | if (copy_to_user(buf, ptr, sz)) | |
156 | return -EFAULT; | |
157 | buf += sz; | |
158 | p += sz; | |
159 | count -= sz; | |
160 | read += sz; | |
161 | } | |
162 | ||
163 | *ppos += read; | |
164 | return read; | |
165 | } | |
166 | ||
167 | static ssize_t write_mem(struct file * file, const char __user * buf, | |
168 | size_t count, loff_t *ppos) | |
169 | { | |
170 | unsigned long p = *ppos; | |
171 | ssize_t written, sz; | |
172 | unsigned long copied; | |
173 | void *ptr; | |
174 | ||
175 | if (!valid_phys_addr_range(p, &count)) | |
176 | return -EFAULT; | |
177 | ||
178 | written = 0; | |
179 | ||
180 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
181 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
182 | if (p < PAGE_SIZE) { | |
183 | unsigned long sz = PAGE_SIZE - p; | |
184 | if (sz > count) | |
185 | sz = count; | |
186 | /* Hmm. Do something? */ | |
187 | buf += sz; | |
188 | p += sz; | |
189 | count -= sz; | |
190 | written += sz; | |
191 | } | |
192 | #endif | |
193 | ||
194 | while (count > 0) { | |
195 | /* | |
196 | * Handle first page in case it's not aligned | |
197 | */ | |
198 | if (-p & (PAGE_SIZE - 1)) | |
199 | sz = -p & (PAGE_SIZE - 1); | |
200 | else | |
201 | sz = PAGE_SIZE; | |
202 | ||
203 | sz = min_t(unsigned long, sz, count); | |
204 | ||
205 | /* | |
206 | * On ia64 if a page has been mapped somewhere as | |
207 | * uncached, then it must also be accessed uncached | |
208 | * by the kernel or data corruption may occur | |
209 | */ | |
210 | ptr = xlate_dev_mem_ptr(p); | |
211 | ||
212 | copied = copy_from_user(ptr, buf, sz); | |
213 | if (copied) { | |
214 | ssize_t ret; | |
215 | ||
216 | ret = written + (sz - copied); | |
217 | if (ret) | |
218 | return ret; | |
219 | return -EFAULT; | |
220 | } | |
221 | buf += sz; | |
222 | p += sz; | |
223 | count -= sz; | |
224 | written += sz; | |
225 | } | |
226 | ||
227 | *ppos += written; | |
228 | return written; | |
229 | } | |
230 | ||
44ac8413 BH |
231 | #ifndef __HAVE_PHYS_MEM_ACCESS_PROT |
232 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
233 | unsigned long size, pgprot_t vma_prot) | |
234 | { | |
235 | #ifdef pgprot_noncached | |
236 | unsigned long offset = pfn << PAGE_SHIFT; | |
237 | ||
238 | if (uncached_access(file, offset)) | |
239 | return pgprot_noncached(vma_prot); | |
240 | #endif | |
241 | return vma_prot; | |
242 | } | |
243 | #endif | |
244 | ||
1da177e4 LT |
245 | static int mmap_mem(struct file * file, struct vm_area_struct * vma) |
246 | { | |
8b150478 | 247 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
1da177e4 LT |
248 | vma->vm_end - vma->vm_start, |
249 | vma->vm_page_prot); | |
1da177e4 LT |
250 | |
251 | /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ | |
252 | if (remap_pfn_range(vma, | |
253 | vma->vm_start, | |
254 | vma->vm_pgoff, | |
255 | vma->vm_end-vma->vm_start, | |
256 | vma->vm_page_prot)) | |
257 | return -EAGAIN; | |
258 | return 0; | |
259 | } | |
260 | ||
261 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | |
262 | { | |
4bb82551 LT |
263 | unsigned long pfn; |
264 | ||
265 | /* Turn a kernel-virtual address into a physical page frame */ | |
266 | pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; | |
267 | ||
1da177e4 LT |
268 | /* |
269 | * RED-PEN: on some architectures there is more mapped memory | |
270 | * than available in mem_map which pfn_valid checks | |
271 | * for. Perhaps should add a new macro here. | |
272 | * | |
273 | * RED-PEN: vmalloc is not supported right now. | |
274 | */ | |
4bb82551 | 275 | if (!pfn_valid(pfn)) |
1da177e4 | 276 | return -EIO; |
4bb82551 LT |
277 | |
278 | vma->vm_pgoff = pfn; | |
1da177e4 LT |
279 | return mmap_mem(file, vma); |
280 | } | |
281 | ||
50b1fdbd VG |
282 | #ifdef CONFIG_CRASH_DUMP |
283 | /* | |
284 | * Read memory corresponding to the old kernel. | |
50b1fdbd | 285 | */ |
315c215c | 286 | static ssize_t read_oldmem(struct file *file, char __user *buf, |
50b1fdbd VG |
287 | size_t count, loff_t *ppos) |
288 | { | |
315c215c VG |
289 | unsigned long pfn, offset; |
290 | size_t read = 0, csize; | |
291 | int rc = 0; | |
50b1fdbd | 292 | |
72414d3f | 293 | while (count) { |
50b1fdbd | 294 | pfn = *ppos / PAGE_SIZE; |
315c215c VG |
295 | if (pfn > saved_max_pfn) |
296 | return read; | |
50b1fdbd | 297 | |
315c215c VG |
298 | offset = (unsigned long)(*ppos % PAGE_SIZE); |
299 | if (count > PAGE_SIZE - offset) | |
300 | csize = PAGE_SIZE - offset; | |
301 | else | |
302 | csize = count; | |
50b1fdbd | 303 | |
315c215c VG |
304 | rc = copy_oldmem_page(pfn, buf, csize, offset, 1); |
305 | if (rc < 0) | |
306 | return rc; | |
50b1fdbd VG |
307 | buf += csize; |
308 | *ppos += csize; | |
309 | read += csize; | |
310 | count -= csize; | |
311 | } | |
50b1fdbd VG |
312 | return read; |
313 | } | |
314 | #endif | |
315 | ||
1da177e4 LT |
316 | extern long vread(char *buf, char *addr, unsigned long count); |
317 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
318 | ||
319 | /* | |
320 | * This function reads the *virtual* memory as seen by the kernel. | |
321 | */ | |
322 | static ssize_t read_kmem(struct file *file, char __user *buf, | |
323 | size_t count, loff_t *ppos) | |
324 | { | |
325 | unsigned long p = *ppos; | |
326 | ssize_t low_count, read, sz; | |
327 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ | |
328 | ||
329 | read = 0; | |
330 | if (p < (unsigned long) high_memory) { | |
331 | low_count = count; | |
332 | if (count > (unsigned long) high_memory - p) | |
333 | low_count = (unsigned long) high_memory - p; | |
334 | ||
335 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
336 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
337 | if (p < PAGE_SIZE && low_count > 0) { | |
338 | size_t tmp = PAGE_SIZE - p; | |
339 | if (tmp > low_count) tmp = low_count; | |
340 | if (clear_user(buf, tmp)) | |
341 | return -EFAULT; | |
342 | buf += tmp; | |
343 | p += tmp; | |
344 | read += tmp; | |
345 | low_count -= tmp; | |
346 | count -= tmp; | |
347 | } | |
348 | #endif | |
349 | while (low_count > 0) { | |
350 | /* | |
351 | * Handle first page in case it's not aligned | |
352 | */ | |
353 | if (-p & (PAGE_SIZE - 1)) | |
354 | sz = -p & (PAGE_SIZE - 1); | |
355 | else | |
356 | sz = PAGE_SIZE; | |
357 | ||
358 | sz = min_t(unsigned long, sz, low_count); | |
359 | ||
360 | /* | |
361 | * On ia64 if a page has been mapped somewhere as | |
362 | * uncached, then it must also be accessed uncached | |
363 | * by the kernel or data corruption may occur | |
364 | */ | |
365 | kbuf = xlate_dev_kmem_ptr((char *)p); | |
366 | ||
367 | if (copy_to_user(buf, kbuf, sz)) | |
368 | return -EFAULT; | |
369 | buf += sz; | |
370 | p += sz; | |
371 | read += sz; | |
372 | low_count -= sz; | |
373 | count -= sz; | |
374 | } | |
375 | } | |
376 | ||
377 | if (count > 0) { | |
378 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
379 | if (!kbuf) | |
380 | return -ENOMEM; | |
381 | while (count > 0) { | |
382 | int len = count; | |
383 | ||
384 | if (len > PAGE_SIZE) | |
385 | len = PAGE_SIZE; | |
386 | len = vread(kbuf, (char *)p, len); | |
387 | if (!len) | |
388 | break; | |
389 | if (copy_to_user(buf, kbuf, len)) { | |
390 | free_page((unsigned long)kbuf); | |
391 | return -EFAULT; | |
392 | } | |
393 | count -= len; | |
394 | buf += len; | |
395 | read += len; | |
396 | p += len; | |
397 | } | |
398 | free_page((unsigned long)kbuf); | |
399 | } | |
400 | *ppos = p; | |
401 | return read; | |
402 | } | |
403 | ||
404 | ||
405 | static inline ssize_t | |
406 | do_write_kmem(void *p, unsigned long realp, const char __user * buf, | |
407 | size_t count, loff_t *ppos) | |
408 | { | |
409 | ssize_t written, sz; | |
410 | unsigned long copied; | |
411 | ||
412 | written = 0; | |
413 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
414 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
415 | if (realp < PAGE_SIZE) { | |
416 | unsigned long sz = PAGE_SIZE - realp; | |
417 | if (sz > count) | |
418 | sz = count; | |
419 | /* Hmm. Do something? */ | |
420 | buf += sz; | |
421 | p += sz; | |
422 | realp += sz; | |
423 | count -= sz; | |
424 | written += sz; | |
425 | } | |
426 | #endif | |
427 | ||
428 | while (count > 0) { | |
429 | char *ptr; | |
430 | /* | |
431 | * Handle first page in case it's not aligned | |
432 | */ | |
433 | if (-realp & (PAGE_SIZE - 1)) | |
434 | sz = -realp & (PAGE_SIZE - 1); | |
435 | else | |
436 | sz = PAGE_SIZE; | |
437 | ||
438 | sz = min_t(unsigned long, sz, count); | |
439 | ||
440 | /* | |
441 | * On ia64 if a page has been mapped somewhere as | |
442 | * uncached, then it must also be accessed uncached | |
443 | * by the kernel or data corruption may occur | |
444 | */ | |
445 | ptr = xlate_dev_kmem_ptr(p); | |
446 | ||
447 | copied = copy_from_user(ptr, buf, sz); | |
448 | if (copied) { | |
449 | ssize_t ret; | |
450 | ||
451 | ret = written + (sz - copied); | |
452 | if (ret) | |
453 | return ret; | |
454 | return -EFAULT; | |
455 | } | |
456 | buf += sz; | |
457 | p += sz; | |
458 | realp += sz; | |
459 | count -= sz; | |
460 | written += sz; | |
461 | } | |
462 | ||
463 | *ppos += written; | |
464 | return written; | |
465 | } | |
466 | ||
467 | ||
468 | /* | |
469 | * This function writes to the *virtual* memory as seen by the kernel. | |
470 | */ | |
471 | static ssize_t write_kmem(struct file * file, const char __user * buf, | |
472 | size_t count, loff_t *ppos) | |
473 | { | |
474 | unsigned long p = *ppos; | |
475 | ssize_t wrote = 0; | |
476 | ssize_t virtr = 0; | |
477 | ssize_t written; | |
478 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ | |
479 | ||
480 | if (p < (unsigned long) high_memory) { | |
481 | ||
482 | wrote = count; | |
483 | if (count > (unsigned long) high_memory - p) | |
484 | wrote = (unsigned long) high_memory - p; | |
485 | ||
486 | written = do_write_kmem((void*)p, p, buf, wrote, ppos); | |
487 | if (written != wrote) | |
488 | return written; | |
489 | wrote = written; | |
490 | p += wrote; | |
491 | buf += wrote; | |
492 | count -= wrote; | |
493 | } | |
494 | ||
495 | if (count > 0) { | |
496 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
497 | if (!kbuf) | |
498 | return wrote ? wrote : -ENOMEM; | |
499 | while (count > 0) { | |
500 | int len = count; | |
501 | ||
502 | if (len > PAGE_SIZE) | |
503 | len = PAGE_SIZE; | |
504 | if (len) { | |
505 | written = copy_from_user(kbuf, buf, len); | |
506 | if (written) { | |
507 | ssize_t ret; | |
508 | ||
509 | free_page((unsigned long)kbuf); | |
510 | ret = wrote + virtr + (len - written); | |
511 | return ret ? ret : -EFAULT; | |
512 | } | |
513 | } | |
514 | len = vwrite(kbuf, (char *)p, len); | |
515 | count -= len; | |
516 | buf += len; | |
517 | virtr += len; | |
518 | p += len; | |
519 | } | |
520 | free_page((unsigned long)kbuf); | |
521 | } | |
522 | ||
523 | *ppos = p; | |
524 | return virtr + wrote; | |
525 | } | |
526 | ||
145d01e4 | 527 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
528 | static ssize_t read_port(struct file * file, char __user * buf, |
529 | size_t count, loff_t *ppos) | |
530 | { | |
531 | unsigned long i = *ppos; | |
532 | char __user *tmp = buf; | |
533 | ||
534 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
535 | return -EFAULT; | |
536 | while (count-- > 0 && i < 65536) { | |
537 | if (__put_user(inb(i),tmp) < 0) | |
538 | return -EFAULT; | |
539 | i++; | |
540 | tmp++; | |
541 | } | |
542 | *ppos = i; | |
543 | return tmp-buf; | |
544 | } | |
545 | ||
546 | static ssize_t write_port(struct file * file, const char __user * buf, | |
547 | size_t count, loff_t *ppos) | |
548 | { | |
549 | unsigned long i = *ppos; | |
550 | const char __user * tmp = buf; | |
551 | ||
552 | if (!access_ok(VERIFY_READ,buf,count)) | |
553 | return -EFAULT; | |
554 | while (count-- > 0 && i < 65536) { | |
555 | char c; | |
556 | if (__get_user(c, tmp)) | |
557 | return -EFAULT; | |
558 | outb(c,i); | |
559 | i++; | |
560 | tmp++; | |
561 | } | |
562 | *ppos = i; | |
563 | return tmp-buf; | |
564 | } | |
565 | #endif | |
566 | ||
567 | static ssize_t read_null(struct file * file, char __user * buf, | |
568 | size_t count, loff_t *ppos) | |
569 | { | |
570 | return 0; | |
571 | } | |
572 | ||
573 | static ssize_t write_null(struct file * file, const char __user * buf, | |
574 | size_t count, loff_t *ppos) | |
575 | { | |
576 | return count; | |
577 | } | |
578 | ||
579 | #ifdef CONFIG_MMU | |
580 | /* | |
581 | * For fun, we are using the MMU for this. | |
582 | */ | |
583 | static inline size_t read_zero_pagealigned(char __user * buf, size_t size) | |
584 | { | |
585 | struct mm_struct *mm; | |
586 | struct vm_area_struct * vma; | |
587 | unsigned long addr=(unsigned long)buf; | |
588 | ||
589 | mm = current->mm; | |
590 | /* Oops, this was forgotten before. -ben */ | |
591 | down_read(&mm->mmap_sem); | |
592 | ||
593 | /* For private mappings, just map in zero pages. */ | |
594 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
595 | unsigned long count; | |
596 | ||
597 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) | |
598 | goto out_up; | |
6aab341e | 599 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) |
1da177e4 LT |
600 | break; |
601 | count = vma->vm_end - addr; | |
602 | if (count > size) | |
603 | count = size; | |
604 | ||
605 | zap_page_range(vma, addr, count, NULL); | |
606 | zeromap_page_range(vma, addr, count, PAGE_COPY); | |
607 | ||
608 | size -= count; | |
609 | buf += count; | |
610 | addr += count; | |
611 | if (size == 0) | |
612 | goto out_up; | |
613 | } | |
614 | ||
615 | up_read(&mm->mmap_sem); | |
616 | ||
617 | /* The shared case is hard. Let's do the conventional zeroing. */ | |
618 | do { | |
619 | unsigned long unwritten = clear_user(buf, PAGE_SIZE); | |
620 | if (unwritten) | |
621 | return size + unwritten - PAGE_SIZE; | |
622 | cond_resched(); | |
623 | buf += PAGE_SIZE; | |
624 | size -= PAGE_SIZE; | |
625 | } while (size); | |
626 | ||
627 | return size; | |
628 | out_up: | |
629 | up_read(&mm->mmap_sem); | |
630 | return size; | |
631 | } | |
632 | ||
633 | static ssize_t read_zero(struct file * file, char __user * buf, | |
634 | size_t count, loff_t *ppos) | |
635 | { | |
636 | unsigned long left, unwritten, written = 0; | |
637 | ||
638 | if (!count) | |
639 | return 0; | |
640 | ||
641 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
642 | return -EFAULT; | |
643 | ||
644 | left = count; | |
645 | ||
646 | /* do we want to be clever? Arbitrary cut-off */ | |
647 | if (count >= PAGE_SIZE*4) { | |
648 | unsigned long partial; | |
649 | ||
650 | /* How much left of the page? */ | |
651 | partial = (PAGE_SIZE-1) & -(unsigned long) buf; | |
652 | unwritten = clear_user(buf, partial); | |
653 | written = partial - unwritten; | |
654 | if (unwritten) | |
655 | goto out; | |
656 | left -= partial; | |
657 | buf += partial; | |
658 | unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); | |
659 | written += (left & PAGE_MASK) - unwritten; | |
660 | if (unwritten) | |
661 | goto out; | |
662 | buf += left & PAGE_MASK; | |
663 | left &= ~PAGE_MASK; | |
664 | } | |
665 | unwritten = clear_user(buf, left); | |
666 | written += left - unwritten; | |
667 | out: | |
668 | return written ? written : -EFAULT; | |
669 | } | |
670 | ||
671 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
672 | { | |
673 | if (vma->vm_flags & VM_SHARED) | |
674 | return shmem_zero_setup(vma); | |
675 | if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) | |
676 | return -EAGAIN; | |
677 | return 0; | |
678 | } | |
679 | #else /* CONFIG_MMU */ | |
680 | static ssize_t read_zero(struct file * file, char * buf, | |
681 | size_t count, loff_t *ppos) | |
682 | { | |
683 | size_t todo = count; | |
684 | ||
685 | while (todo) { | |
686 | size_t chunk = todo; | |
687 | ||
688 | if (chunk > 4096) | |
689 | chunk = 4096; /* Just for latency reasons */ | |
690 | if (clear_user(buf, chunk)) | |
691 | return -EFAULT; | |
692 | buf += chunk; | |
693 | todo -= chunk; | |
694 | cond_resched(); | |
695 | } | |
696 | return count; | |
697 | } | |
698 | ||
699 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
700 | { | |
701 | return -ENOSYS; | |
702 | } | |
703 | #endif /* CONFIG_MMU */ | |
704 | ||
705 | static ssize_t write_full(struct file * file, const char __user * buf, | |
706 | size_t count, loff_t *ppos) | |
707 | { | |
708 | return -ENOSPC; | |
709 | } | |
710 | ||
711 | /* | |
712 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you | |
713 | * can fopen() both devices with "a" now. This was previously impossible. | |
714 | * -- SRB. | |
715 | */ | |
716 | ||
717 | static loff_t null_lseek(struct file * file, loff_t offset, int orig) | |
718 | { | |
719 | return file->f_pos = 0; | |
720 | } | |
721 | ||
722 | /* | |
723 | * The memory devices use the full 32/64 bits of the offset, and so we cannot | |
724 | * check against negative addresses: they are ok. The return value is weird, | |
725 | * though, in that case (0). | |
726 | * | |
727 | * also note that seeking relative to the "end of file" isn't supported: | |
728 | * it has no meaning, so it returns -EINVAL. | |
729 | */ | |
730 | static loff_t memory_lseek(struct file * file, loff_t offset, int orig) | |
731 | { | |
732 | loff_t ret; | |
733 | ||
734 | down(&file->f_dentry->d_inode->i_sem); | |
735 | switch (orig) { | |
736 | case 0: | |
737 | file->f_pos = offset; | |
738 | ret = file->f_pos; | |
739 | force_successful_syscall_return(); | |
740 | break; | |
741 | case 1: | |
742 | file->f_pos += offset; | |
743 | ret = file->f_pos; | |
744 | force_successful_syscall_return(); | |
745 | break; | |
746 | default: | |
747 | ret = -EINVAL; | |
748 | } | |
749 | up(&file->f_dentry->d_inode->i_sem); | |
750 | return ret; | |
751 | } | |
752 | ||
753 | static int open_port(struct inode * inode, struct file * filp) | |
754 | { | |
755 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | |
756 | } | |
757 | ||
758 | #define zero_lseek null_lseek | |
759 | #define full_lseek null_lseek | |
760 | #define write_zero write_null | |
761 | #define read_full read_zero | |
762 | #define open_mem open_port | |
763 | #define open_kmem open_mem | |
50b1fdbd | 764 | #define open_oldmem open_mem |
1da177e4 LT |
765 | |
766 | static struct file_operations mem_fops = { | |
767 | .llseek = memory_lseek, | |
768 | .read = read_mem, | |
769 | .write = write_mem, | |
770 | .mmap = mmap_mem, | |
771 | .open = open_mem, | |
772 | }; | |
773 | ||
774 | static struct file_operations kmem_fops = { | |
775 | .llseek = memory_lseek, | |
776 | .read = read_kmem, | |
777 | .write = write_kmem, | |
778 | .mmap = mmap_kmem, | |
779 | .open = open_kmem, | |
780 | }; | |
781 | ||
782 | static struct file_operations null_fops = { | |
783 | .llseek = null_lseek, | |
784 | .read = read_null, | |
785 | .write = write_null, | |
786 | }; | |
787 | ||
145d01e4 | 788 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
789 | static struct file_operations port_fops = { |
790 | .llseek = memory_lseek, | |
791 | .read = read_port, | |
792 | .write = write_port, | |
793 | .open = open_port, | |
794 | }; | |
795 | #endif | |
796 | ||
797 | static struct file_operations zero_fops = { | |
798 | .llseek = zero_lseek, | |
799 | .read = read_zero, | |
800 | .write = write_zero, | |
801 | .mmap = mmap_zero, | |
802 | }; | |
803 | ||
804 | static struct backing_dev_info zero_bdi = { | |
805 | .capabilities = BDI_CAP_MAP_COPY, | |
806 | }; | |
807 | ||
808 | static struct file_operations full_fops = { | |
809 | .llseek = full_lseek, | |
810 | .read = read_full, | |
811 | .write = write_full, | |
812 | }; | |
813 | ||
50b1fdbd VG |
814 | #ifdef CONFIG_CRASH_DUMP |
815 | static struct file_operations oldmem_fops = { | |
816 | .read = read_oldmem, | |
817 | .open = open_oldmem, | |
818 | }; | |
819 | #endif | |
820 | ||
1da177e4 LT |
821 | static ssize_t kmsg_write(struct file * file, const char __user * buf, |
822 | size_t count, loff_t *ppos) | |
823 | { | |
824 | char *tmp; | |
cd140a5c | 825 | ssize_t ret; |
1da177e4 LT |
826 | |
827 | tmp = kmalloc(count + 1, GFP_KERNEL); | |
828 | if (tmp == NULL) | |
829 | return -ENOMEM; | |
830 | ret = -EFAULT; | |
831 | if (!copy_from_user(tmp, buf, count)) { | |
832 | tmp[count] = 0; | |
833 | ret = printk("%s", tmp); | |
cd140a5c GC |
834 | if (ret > count) |
835 | /* printk can add a prefix */ | |
836 | ret = count; | |
1da177e4 LT |
837 | } |
838 | kfree(tmp); | |
839 | return ret; | |
840 | } | |
841 | ||
842 | static struct file_operations kmsg_fops = { | |
843 | .write = kmsg_write, | |
844 | }; | |
845 | ||
846 | static int memory_open(struct inode * inode, struct file * filp) | |
847 | { | |
848 | switch (iminor(inode)) { | |
849 | case 1: | |
850 | filp->f_op = &mem_fops; | |
851 | break; | |
852 | case 2: | |
853 | filp->f_op = &kmem_fops; | |
854 | break; | |
855 | case 3: | |
856 | filp->f_op = &null_fops; | |
857 | break; | |
145d01e4 | 858 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
859 | case 4: |
860 | filp->f_op = &port_fops; | |
861 | break; | |
862 | #endif | |
863 | case 5: | |
864 | filp->f_mapping->backing_dev_info = &zero_bdi; | |
865 | filp->f_op = &zero_fops; | |
866 | break; | |
867 | case 7: | |
868 | filp->f_op = &full_fops; | |
869 | break; | |
870 | case 8: | |
871 | filp->f_op = &random_fops; | |
872 | break; | |
873 | case 9: | |
874 | filp->f_op = &urandom_fops; | |
875 | break; | |
876 | case 11: | |
877 | filp->f_op = &kmsg_fops; | |
878 | break; | |
50b1fdbd VG |
879 | #ifdef CONFIG_CRASH_DUMP |
880 | case 12: | |
881 | filp->f_op = &oldmem_fops; | |
882 | break; | |
883 | #endif | |
1da177e4 LT |
884 | default: |
885 | return -ENXIO; | |
886 | } | |
887 | if (filp->f_op && filp->f_op->open) | |
888 | return filp->f_op->open(inode,filp); | |
889 | return 0; | |
890 | } | |
891 | ||
892 | static struct file_operations memory_fops = { | |
893 | .open = memory_open, /* just a selector for the real open */ | |
894 | }; | |
895 | ||
896 | static const struct { | |
897 | unsigned int minor; | |
898 | char *name; | |
899 | umode_t mode; | |
900 | struct file_operations *fops; | |
901 | } devlist[] = { /* list of minor devices */ | |
902 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, | |
903 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, | |
904 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, | |
145d01e4 | 905 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
1da177e4 LT |
906 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
907 | #endif | |
908 | {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, | |
909 | {7, "full", S_IRUGO | S_IWUGO, &full_fops}, | |
910 | {8, "random", S_IRUGO | S_IWUSR, &random_fops}, | |
911 | {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, | |
912 | {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, | |
50b1fdbd VG |
913 | #ifdef CONFIG_CRASH_DUMP |
914 | {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, | |
915 | #endif | |
1da177e4 LT |
916 | }; |
917 | ||
ca8eca68 | 918 | static struct class *mem_class; |
1da177e4 LT |
919 | |
920 | static int __init chr_dev_init(void) | |
921 | { | |
922 | int i; | |
923 | ||
924 | if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) | |
925 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); | |
926 | ||
ca8eca68 | 927 | mem_class = class_create(THIS_MODULE, "mem"); |
1da177e4 | 928 | for (i = 0; i < ARRAY_SIZE(devlist); i++) { |
53f46542 GKH |
929 | class_device_create(mem_class, NULL, |
930 | MKDEV(MEM_MAJOR, devlist[i].minor), | |
1da177e4 LT |
931 | NULL, devlist[i].name); |
932 | devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor), | |
933 | S_IFCHR | devlist[i].mode, devlist[i].name); | |
934 | } | |
935 | ||
936 | return 0; | |
937 | } | |
938 | ||
939 | fs_initcall(chr_dev_init); |