]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/m68k/kernel/sys_m68k.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[net-next-2.6.git] / arch / m68k / kernel / sys_m68k.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
a9415644 9#include <linux/capability.h>
1da177e4
LT
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
4e950f6f 13#include <linux/fs.h>
1da177e4
LT
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/sem.h>
17#include <linux/msg.h>
18#include <linux/shm.h>
19#include <linux/stat.h>
20#include <linux/syscalls.h>
21#include <linux/mman.h>
22#include <linux/file.h>
cba4fbbf 23#include <linux/ipc.h>
1da177e4
LT
24
25#include <asm/setup.h>
26#include <asm/uaccess.h>
27#include <asm/cachectl.h>
28#include <asm/traps.h>
1da177e4 29#include <asm/page.h>
fe74290d 30#include <asm/unistd.h>
9674cdc7
MK
31#include <linux/elf.h>
32#include <asm/tlb.h>
33
34asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
35 unsigned long error_code);
1da177e4 36
1da177e4
LT
37asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags,
39 unsigned long fd, unsigned long pgoff)
40{
f8b72560
AV
41 /*
42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43 * so we need to shift the argument down by 1; m68k mmap64(3)
44 * (in libc) expects the last argument of mmap2 in 4Kb units.
45 */
46 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1da177e4
LT
47}
48
1da177e4
LT
49/* Convert virtual (user) address VADDR to physical address PADDR */
50#define virt_to_phys_040(vaddr) \
51({ \
52 unsigned long _mmusr, _paddr; \
53 \
54 __asm__ __volatile__ (".chip 68040\n\t" \
55 "ptestr (%1)\n\t" \
56 "movec %%mmusr,%0\n\t" \
57 ".chip 68k" \
58 : "=r" (_mmusr) \
59 : "a" (vaddr)); \
60 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
61 _paddr; \
62})
63
64static inline int
65cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
66{
67 unsigned long paddr, i;
68
69 switch (scope)
70 {
71 case FLUSH_SCOPE_ALL:
72 switch (cache)
73 {
74 case FLUSH_CACHE_DATA:
75 /* This nop is needed for some broken versions of the 68040. */
76 __asm__ __volatile__ ("nop\n\t"
77 ".chip 68040\n\t"
78 "cpusha %dc\n\t"
79 ".chip 68k");
80 break;
81 case FLUSH_CACHE_INSN:
82 __asm__ __volatile__ ("nop\n\t"
83 ".chip 68040\n\t"
84 "cpusha %ic\n\t"
85 ".chip 68k");
86 break;
87 default:
88 case FLUSH_CACHE_BOTH:
89 __asm__ __volatile__ ("nop\n\t"
90 ".chip 68040\n\t"
91 "cpusha %bc\n\t"
92 ".chip 68k");
93 break;
94 }
95 break;
96
97 case FLUSH_SCOPE_LINE:
98 /* Find the physical address of the first mapped page in the
99 address range. */
100 if ((paddr = virt_to_phys_040(addr))) {
101 paddr += addr & ~(PAGE_MASK | 15);
102 len = (len + (addr & 15) + 15) >> 4;
103 } else {
104 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
105
106 if (len <= tmp)
107 return 0;
108 addr += tmp;
109 len -= tmp;
110 tmp = PAGE_SIZE;
111 for (;;)
112 {
113 if ((paddr = virt_to_phys_040(addr)))
114 break;
115 if (len <= tmp)
116 return 0;
117 addr += tmp;
118 len -= tmp;
119 }
120 len = (len + 15) >> 4;
121 }
122 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
123 while (len--)
124 {
125 switch (cache)
126 {
127 case FLUSH_CACHE_DATA:
128 __asm__ __volatile__ ("nop\n\t"
129 ".chip 68040\n\t"
130 "cpushl %%dc,(%0)\n\t"
131 ".chip 68k"
132 : : "a" (paddr));
133 break;
134 case FLUSH_CACHE_INSN:
135 __asm__ __volatile__ ("nop\n\t"
136 ".chip 68040\n\t"
137 "cpushl %%ic,(%0)\n\t"
138 ".chip 68k"
139 : : "a" (paddr));
140 break;
141 default:
142 case FLUSH_CACHE_BOTH:
143 __asm__ __volatile__ ("nop\n\t"
144 ".chip 68040\n\t"
145 "cpushl %%bc,(%0)\n\t"
146 ".chip 68k"
147 : : "a" (paddr));
148 break;
149 }
150 if (!--i && len)
151 {
152 /*
153 * No need to page align here since it is done by
154 * virt_to_phys_040().
155 */
156 addr += PAGE_SIZE;
157 i = PAGE_SIZE / 16;
158 /* Recompute physical address when crossing a page
159 boundary. */
160 for (;;)
161 {
162 if ((paddr = virt_to_phys_040(addr)))
163 break;
164 if (len <= i)
165 return 0;
166 len -= i;
167 addr += PAGE_SIZE;
168 }
169 }
170 else
171 paddr += 16;
172 }
173 break;
174
175 default:
176 case FLUSH_SCOPE_PAGE:
177 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
178 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
179 {
180 if (!(paddr = virt_to_phys_040(addr)))
181 continue;
182 switch (cache)
183 {
184 case FLUSH_CACHE_DATA:
185 __asm__ __volatile__ ("nop\n\t"
186 ".chip 68040\n\t"
187 "cpushp %%dc,(%0)\n\t"
188 ".chip 68k"
189 : : "a" (paddr));
190 break;
191 case FLUSH_CACHE_INSN:
192 __asm__ __volatile__ ("nop\n\t"
193 ".chip 68040\n\t"
194 "cpushp %%ic,(%0)\n\t"
195 ".chip 68k"
196 : : "a" (paddr));
197 break;
198 default:
199 case FLUSH_CACHE_BOTH:
200 __asm__ __volatile__ ("nop\n\t"
201 ".chip 68040\n\t"
202 "cpushp %%bc,(%0)\n\t"
203 ".chip 68k"
204 : : "a" (paddr));
205 break;
206 }
207 }
208 break;
209 }
210 return 0;
211}
212
213#define virt_to_phys_060(vaddr) \
214({ \
215 unsigned long paddr; \
216 __asm__ __volatile__ (".chip 68060\n\t" \
217 "plpar (%0)\n\t" \
218 ".chip 68k" \
219 : "=a" (paddr) \
220 : "0" (vaddr)); \
221 (paddr); /* XXX */ \
222})
223
224static inline int
225cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
226{
227 unsigned long paddr, i;
228
229 /*
230 * 68060 manual says:
231 * cpush %dc : flush DC, remains valid (with our %cacr setup)
232 * cpush %ic : invalidate IC
233 * cpush %bc : flush DC + invalidate IC
234 */
235 switch (scope)
236 {
237 case FLUSH_SCOPE_ALL:
238 switch (cache)
239 {
240 case FLUSH_CACHE_DATA:
241 __asm__ __volatile__ (".chip 68060\n\t"
242 "cpusha %dc\n\t"
243 ".chip 68k");
244 break;
245 case FLUSH_CACHE_INSN:
246 __asm__ __volatile__ (".chip 68060\n\t"
247 "cpusha %ic\n\t"
248 ".chip 68k");
249 break;
250 default:
251 case FLUSH_CACHE_BOTH:
252 __asm__ __volatile__ (".chip 68060\n\t"
253 "cpusha %bc\n\t"
254 ".chip 68k");
255 break;
256 }
257 break;
258
259 case FLUSH_SCOPE_LINE:
260 /* Find the physical address of the first mapped page in the
261 address range. */
262 len += addr & 15;
263 addr &= -16;
264 if (!(paddr = virt_to_phys_060(addr))) {
265 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
266
267 if (len <= tmp)
268 return 0;
269 addr += tmp;
270 len -= tmp;
271 tmp = PAGE_SIZE;
272 for (;;)
273 {
274 if ((paddr = virt_to_phys_060(addr)))
275 break;
276 if (len <= tmp)
277 return 0;
278 addr += tmp;
279 len -= tmp;
280 }
281 }
282 len = (len + 15) >> 4;
283 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
284 while (len--)
285 {
286 switch (cache)
287 {
288 case FLUSH_CACHE_DATA:
289 __asm__ __volatile__ (".chip 68060\n\t"
290 "cpushl %%dc,(%0)\n\t"
291 ".chip 68k"
292 : : "a" (paddr));
293 break;
294 case FLUSH_CACHE_INSN:
295 __asm__ __volatile__ (".chip 68060\n\t"
296 "cpushl %%ic,(%0)\n\t"
297 ".chip 68k"
298 : : "a" (paddr));
299 break;
300 default:
301 case FLUSH_CACHE_BOTH:
302 __asm__ __volatile__ (".chip 68060\n\t"
303 "cpushl %%bc,(%0)\n\t"
304 ".chip 68k"
305 : : "a" (paddr));
306 break;
307 }
308 if (!--i && len)
309 {
310
311 /*
312 * We just want to jump to the first cache line
313 * in the next page.
314 */
315 addr += PAGE_SIZE;
316 addr &= PAGE_MASK;
317
318 i = PAGE_SIZE / 16;
319 /* Recompute physical address when crossing a page
320 boundary. */
321 for (;;)
322 {
323 if ((paddr = virt_to_phys_060(addr)))
324 break;
325 if (len <= i)
326 return 0;
327 len -= i;
328 addr += PAGE_SIZE;
329 }
330 }
331 else
332 paddr += 16;
333 }
334 break;
335
336 default:
337 case FLUSH_SCOPE_PAGE:
338 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
339 addr &= PAGE_MASK; /* Workaround for bug in some
340 revisions of the 68060 */
341 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
342 {
343 if (!(paddr = virt_to_phys_060(addr)))
344 continue;
345 switch (cache)
346 {
347 case FLUSH_CACHE_DATA:
348 __asm__ __volatile__ (".chip 68060\n\t"
349 "cpushp %%dc,(%0)\n\t"
350 ".chip 68k"
351 : : "a" (paddr));
352 break;
353 case FLUSH_CACHE_INSN:
354 __asm__ __volatile__ (".chip 68060\n\t"
355 "cpushp %%ic,(%0)\n\t"
356 ".chip 68k"
357 : : "a" (paddr));
358 break;
359 default:
360 case FLUSH_CACHE_BOTH:
361 __asm__ __volatile__ (".chip 68060\n\t"
362 "cpushp %%bc,(%0)\n\t"
363 ".chip 68k"
364 : : "a" (paddr));
365 break;
366 }
367 }
368 break;
369 }
370 return 0;
371}
372
373/* sys_cacheflush -- flush (part of) the processor cache. */
374asmlinkage int
375sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
376{
377 struct vm_area_struct *vma;
378 int ret = -EINVAL;
379
380 lock_kernel();
381 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
382 cache & ~FLUSH_CACHE_BOTH)
383 goto out;
384
385 if (scope == FLUSH_SCOPE_ALL) {
386 /* Only the superuser may explicitly flush the whole cache. */
387 ret = -EPERM;
388 if (!capable(CAP_SYS_ADMIN))
389 goto out;
390 } else {
391 /*
392 * Verify that the specified address region actually belongs
393 * to this process.
394 */
395 vma = find_vma (current->mm, addr);
396 ret = -EINVAL;
397 /* Check for overflow. */
398 if (addr + len < addr)
399 goto out;
400 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
401 goto out;
402 }
403
404 if (CPU_IS_020_OR_030) {
405 if (scope == FLUSH_SCOPE_LINE && len < 256) {
406 unsigned long cacr;
407 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
408 if (cache & FLUSH_CACHE_INSN)
409 cacr |= 4;
410 if (cache & FLUSH_CACHE_DATA)
411 cacr |= 0x400;
412 len >>= 2;
413 while (len--) {
414 __asm__ __volatile__ ("movec %1, %%caar\n\t"
415 "movec %0, %%cacr"
416 : /* no outputs */
417 : "r" (cacr), "r" (addr));
418 addr += 4;
419 }
420 } else {
421 /* Flush the whole cache, even if page granularity requested. */
422 unsigned long cacr;
423 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
424 if (cache & FLUSH_CACHE_INSN)
425 cacr |= 8;
426 if (cache & FLUSH_CACHE_DATA)
427 cacr |= 0x800;
428 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
429 }
430 ret = 0;
431 goto out;
432 } else {
433 /*
434 * 040 or 060: don't blindly trust 'scope', someone could
435 * try to flush a few megs of memory.
436 */
437
438 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
439 scope=FLUSH_SCOPE_PAGE;
440 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
441 scope=FLUSH_SCOPE_ALL;
442 if (CPU_IS_040) {
443 ret = cache_flush_040 (addr, scope, cache, len);
444 } else if (CPU_IS_060) {
445 ret = cache_flush_060 (addr, scope, cache, len);
446 }
447 }
448out:
449 unlock_kernel();
450 return ret;
451}
452
453asmlinkage int sys_getpagesize(void)
454{
455 return PAGE_SIZE;
456}
fe74290d
AB
457
458/*
459 * Do a system call from kernel instead of calling sys_execve so we
460 * end up with proper pt_regs.
461 */
d7627467
DH
462int kernel_execve(const char *filename,
463 const char *const argv[],
464 const char *const envp[])
fe74290d
AB
465{
466 register long __res asm ("%d0") = __NR_execve;
467 register long __a asm ("%d1") = (long)(filename);
468 register long __b asm ("%d2") = (long)(argv);
469 register long __c asm ("%d3") = (long)(envp);
470 asm volatile ("trap #0" : "+d" (__res)
471 : "d" (__a), "d" (__b), "d" (__c));
472 return __res;
473}
9674cdc7
MK
474
475asmlinkage unsigned long sys_get_thread_area(void)
476{
477 return current_thread_info()->tp_value;
478}
479
480asmlinkage int sys_set_thread_area(unsigned long tp)
481{
482 current_thread_info()->tp_value = tp;
483 return 0;
484}
485
486/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
487 D1 (newval). */
488asmlinkage int
489sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
490 unsigned long __user * mem)
491{
492 /* This was borrowed from ARM's implementation. */
493 for (;;) {
494 struct mm_struct *mm = current->mm;
495 pgd_t *pgd;
496 pmd_t *pmd;
497 pte_t *pte;
498 spinlock_t *ptl;
499 unsigned long mem_value;
500
501 down_read(&mm->mmap_sem);
502 pgd = pgd_offset(mm, (unsigned long)mem);
503 if (!pgd_present(*pgd))
504 goto bad_access;
505 pmd = pmd_offset(pgd, (unsigned long)mem);
506 if (!pmd_present(*pmd))
507 goto bad_access;
508 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
509 if (!pte_present(*pte) || !pte_dirty(*pte)
510 || !pte_write(*pte)) {
511 pte_unmap_unlock(pte, ptl);
512 goto bad_access;
513 }
514
515 mem_value = *mem;
516 if (mem_value == oldval)
517 *mem = newval;
518
519 pte_unmap_unlock(pte, ptl);
520 up_read(&mm->mmap_sem);
521 return mem_value;
522
523 bad_access:
524 up_read(&mm->mmap_sem);
525 /* This is not necessarily a bad access, we can get here if
526 a memory we're trying to write to should be copied-on-write.
527 Make the kernel do the necessary page stuff, then re-iterate.
528 Simulate a write access fault to do that. */
529 {
530 /* The first argument of the function corresponds to
531 D1, which is the first field of struct pt_regs. */
532 struct pt_regs *fp = (struct pt_regs *)&newval;
533
534 /* '3' is an RMW flag. */
535 if (do_page_fault(fp, (unsigned long)mem, 3))
536 /* If the do_page_fault() failed, we don't
537 have anything meaningful to return.
538 There should be a SIGSEGV pending for
539 the process. */
540 return 0xdeadbeef;
541 }
542 }
543}
544
545asmlinkage int sys_atomic_barrier(void)
546{
547 /* no code needed for uniprocs */
548 return 0;
549}