]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/kernel/vpe.c
MIPS: VPE: Free relocation chain on error.
[net-next-2.6.git] / arch / mips / kernel / vpe.c
CommitLineData
e01402b1
RB
1/*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
e01402b1
RB
16 */
17
18/*
19 * VPE support module
20 *
21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
26 *
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
28 * i.e cat spapp >/dev/vpe1.
e01402b1 29 */
e01402b1 30#include <linux/kernel.h>
27a3bbaf 31#include <linux/device.h>
e01402b1
RB
32#include <linux/module.h>
33#include <linux/fs.h>
34#include <linux/init.h>
35#include <asm/uaccess.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/vmalloc.h>
39#include <linux/elf.h>
40#include <linux/seq_file.h>
7558da94 41#include <linux/smp_lock.h>
e01402b1
RB
42#include <linux/syscalls.h>
43#include <linux/moduleloader.h>
44#include <linux/interrupt.h>
45#include <linux/poll.h>
46#include <linux/bootmem.h>
47#include <asm/mipsregs.h>
340ee4b9 48#include <asm/mipsmtregs.h>
e01402b1
RB
49#include <asm/cacheflush.h>
50#include <asm/atomic.h>
51#include <asm/cpu.h>
27a3bbaf 52#include <asm/mips_mt.h>
e01402b1
RB
53#include <asm/processor.h>
54#include <asm/system.h>
2600990e
RB
55#include <asm/vpe.h>
56#include <asm/kspd.h>
e01402b1
RB
57
58typedef void *vpe_handle;
59
e01402b1
RB
60#ifndef ARCH_SHF_SMALL
61#define ARCH_SHF_SMALL 0
62#endif
63
64/* If this is set, the section belongs in the init part of the module */
65#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
66
41790e04
RB
67/*
68 * The number of TCs and VPEs physically available on the core
69 */
70static int hw_tcs, hw_vpes;
e01402b1 71static char module_name[] = "vpe";
307bd284 72static int major;
27a3bbaf 73static const int minor = 1; /* fixed for now */
e01402b1 74
2600990e
RB
75#ifdef CONFIG_MIPS_APSP_KSPD
76 static struct kspd_notifications kspd_events;
77static int kspd_events_reqd = 0;
78#endif
79
e01402b1
RB
80/* grab the likely amount of memory we will need. */
81#ifdef CONFIG_MIPS_VPE_LOADER_TOM
82#define P_SIZE (2 * 1024 * 1024)
83#else
84/* add an overhead to the max kmalloc size for non-striped symbols/etc */
85#define P_SIZE (256 * 1024)
86#endif
87
2600990e
RB
88extern unsigned long physical_memsize;
89
e01402b1 90#define MAX_VPES 16
2600990e 91#define VPE_PATH_MAX 256
e01402b1
RB
92
93enum vpe_state {
94 VPE_STATE_UNUSED = 0,
95 VPE_STATE_INUSE,
96 VPE_STATE_RUNNING
97};
98
99enum tc_state {
100 TC_STATE_UNUSED = 0,
101 TC_STATE_INUSE,
102 TC_STATE_RUNNING,
103 TC_STATE_DYNAMIC
104};
105
307bd284 106struct vpe {
e01402b1
RB
107 enum vpe_state state;
108
109 /* (device) minor associated with this vpe */
110 int minor;
111
112 /* elfloader stuff */
113 void *load_addr;
571e0bed 114 unsigned long len;
e01402b1 115 char *pbuffer;
571e0bed 116 unsigned long plen;
2600990e
RB
117 unsigned int uid, gid;
118 char cwd[VPE_PATH_MAX];
e01402b1
RB
119
120 unsigned long __start;
121
122 /* tc's associated with this vpe */
123 struct list_head tc;
124
125 /* The list of vpe's */
126 struct list_head list;
127
128 /* shared symbol address */
129 void *shared_ptr;
2600990e
RB
130
131 /* the list of who wants to know when something major happens */
132 struct list_head notify;
41790e04
RB
133
134 unsigned int ntcs;
307bd284
RB
135};
136
137struct tc {
138 enum tc_state state;
139 int index;
140
07cc0c9e
RB
141 struct vpe *pvpe; /* parent VPE */
142 struct list_head tc; /* The list of TC's with this VPE */
143 struct list_head list; /* The global list of tc's */
307bd284 144};
e01402b1 145
9cfdf6f1 146struct {
e01402b1
RB
147 /* Virtual processing elements */
148 struct list_head vpe_list;
149
150 /* Thread contexts */
151 struct list_head tc_list;
9cfdf6f1
RB
152} vpecontrol = {
153 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155};
e01402b1
RB
156
157static void release_progmem(void *ptr);
e01402b1
RB
158extern void save_gp_address(unsigned int secbase, unsigned int rel);
159
160/* get the vpe associated with this minor */
161struct vpe *get_vpe(int minor)
162{
163 struct vpe *v;
164
2600990e
RB
165 if (!cpu_has_mipsmt)
166 return NULL;
167
e01402b1
RB
168 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
169 if (v->minor == minor)
170 return v;
171 }
172
e01402b1
RB
173 return NULL;
174}
175
176/* get the vpe associated with this minor */
177struct tc *get_tc(int index)
178{
179 struct tc *t;
180
181 list_for_each_entry(t, &vpecontrol.tc_list, list) {
182 if (t->index == index)
183 return t;
184 }
185
e01402b1
RB
186 return NULL;
187}
188
189struct tc *get_tc_unused(void)
190{
191 struct tc *t;
192
193 list_for_each_entry(t, &vpecontrol.tc_list, list) {
194 if (t->state == TC_STATE_UNUSED)
195 return t;
196 }
197
e01402b1
RB
198 return NULL;
199}
200
201/* allocate a vpe and associate it with this minor (or index) */
202struct vpe *alloc_vpe(int minor)
203{
204 struct vpe *v;
205
307bd284 206 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
e01402b1
RB
207 return NULL;
208 }
209
e01402b1
RB
210 INIT_LIST_HEAD(&v->tc);
211 list_add_tail(&v->list, &vpecontrol.vpe_list);
212
2600990e 213 INIT_LIST_HEAD(&v->notify);
e01402b1
RB
214 v->minor = minor;
215 return v;
216}
217
218/* allocate a tc. At startup only tc0 is running, all other can be halted. */
219struct tc *alloc_tc(int index)
220{
07cc0c9e 221 struct tc *tc;
e01402b1 222
07cc0c9e
RB
223 if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
224 goto out;
e01402b1 225
07cc0c9e
RB
226 INIT_LIST_HEAD(&tc->tc);
227 tc->index = index;
228 list_add_tail(&tc->list, &vpecontrol.tc_list);
e01402b1 229
07cc0c9e
RB
230out:
231 return tc;
e01402b1
RB
232}
233
234/* clean up and free everything */
235void release_vpe(struct vpe *v)
236{
237 list_del(&v->list);
238 if (v->load_addr)
239 release_progmem(v);
240 kfree(v);
241}
242
243void dump_mtregs(void)
244{
245 unsigned long val;
246
247 val = read_c0_config3();
248 printk("config3 0x%lx MT %ld\n", val,
249 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
250
e01402b1
RB
251 val = read_c0_mvpcontrol();
252 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
253 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
254 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
255 (val & MVPCONTROL_EVP));
256
2600990e
RB
257 val = read_c0_mvpconf0();
258 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
259 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
260 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
e01402b1
RB
261}
262
263/* Find some VPE program space */
571e0bed 264static void *alloc_progmem(unsigned long len)
e01402b1 265{
5408c490
RB
266 void *addr;
267
e01402b1 268#ifdef CONFIG_MIPS_VPE_LOADER_TOM
5408c490
RB
269 /*
270 * This means you must tell Linux to use less memory than you
271 * physically have, for example by passing a mem= boot argument.
272 */
9f2546ad 273 addr = pfn_to_kaddr(max_low_pfn);
5408c490 274 memset(addr, 0, len);
e01402b1 275#else
5408c490
RB
276 /* simple grab some mem for now */
277 addr = kzalloc(len, GFP_KERNEL);
e01402b1 278#endif
5408c490
RB
279
280 return addr;
e01402b1
RB
281}
282
283static void release_progmem(void *ptr)
284{
285#ifndef CONFIG_MIPS_VPE_LOADER_TOM
286 kfree(ptr);
287#endif
288}
289
290/* Update size with this section: return offset. */
291static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
292{
293 long ret;
294
295 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
296 *size = ret + sechdr->sh_size;
297 return ret;
298}
299
300/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
301 might -- code, read-only data, read-write data, small data. Tally
302 sizes, and place the offsets into sh_entsize fields: high bit means it
303 belongs in init. */
304static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
305 Elf_Shdr * sechdrs, const char *secstrings)
306{
307 static unsigned long const masks[][2] = {
308 /* NOTE: all executable code must be the first section
309 * in this array; otherwise modify the text_size
310 * finder in the two loops below */
311 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
312 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
313 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
314 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
315 };
316 unsigned int m, i;
317
318 for (i = 0; i < hdr->e_shnum; i++)
319 sechdrs[i].sh_entsize = ~0UL;
320
321 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
322 for (i = 0; i < hdr->e_shnum; ++i) {
323 Elf_Shdr *s = &sechdrs[i];
324
325 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
326 if ((s->sh_flags & masks[m][0]) != masks[m][0]
327 || (s->sh_flags & masks[m][1])
328 || s->sh_entsize != ~0UL)
329 continue;
e2a9cf96
RG
330 s->sh_entsize =
331 get_offset((unsigned long *)&mod->core_size, s);
e01402b1
RB
332 }
333
334 if (m == 0)
335 mod->core_text_size = mod->core_size;
336
337 }
338}
339
340
341/* from module-elf32.c, but subverted a little */
342
343struct mips_hi16 {
344 struct mips_hi16 *next;
345 Elf32_Addr *addr;
346 Elf32_Addr value;
347};
348
349static struct mips_hi16 *mips_hi16_list;
350static unsigned int gp_offs, gp_addr;
351
352static int apply_r_mips_none(struct module *me, uint32_t *location,
353 Elf32_Addr v)
354{
355 return 0;
356}
357
358static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
359 Elf32_Addr v)
360{
361 int rel;
362
363 if( !(*location & 0xffff) ) {
364 rel = (int)v - gp_addr;
365 }
366 else {
367 /* .sbss + gp(relative) + offset */
368 /* kludge! */
369 rel = (int)(short)((int)v + gp_offs +
370 (int)(short)(*location & 0xffff) - gp_addr);
371 }
372
373 if( (rel > 32768) || (rel < -32768) ) {
2600990e
RB
374 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
375 "relative address 0x%x out of range of gp register\n",
376 rel);
e01402b1
RB
377 return -ENOEXEC;
378 }
379
380 *location = (*location & 0xffff0000) | (rel & 0xffff);
381
382 return 0;
383}
384
385static int apply_r_mips_pc16(struct module *me, uint32_t *location,
386 Elf32_Addr v)
387{
388 int rel;
389 rel = (((unsigned int)v - (unsigned int)location));
390 rel >>= 2; // because the offset is in _instructions_ not bytes.
391 rel -= 1; // and one instruction less due to the branch delay slot.
392
393 if( (rel > 32768) || (rel < -32768) ) {
2600990e
RB
394 printk(KERN_DEBUG "VPE loader: "
395 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
e01402b1
RB
396 return -ENOEXEC;
397 }
398
399 *location = (*location & 0xffff0000) | (rel & 0xffff);
400
401 return 0;
402}
403
404static int apply_r_mips_32(struct module *me, uint32_t *location,
405 Elf32_Addr v)
406{
407 *location += v;
408
409 return 0;
410}
411
412static int apply_r_mips_26(struct module *me, uint32_t *location,
413 Elf32_Addr v)
414{
415 if (v % 4) {
2600990e
RB
416 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
417 " unaligned relocation\n");
e01402b1
RB
418 return -ENOEXEC;
419 }
420
307bd284
RB
421/*
422 * Not desperately convinced this is a good check of an overflow condition
423 * anyway. But it gets in the way of handling undefined weak symbols which
424 * we want to set to zero.
425 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
426 * printk(KERN_ERR
427 * "module %s: relocation overflow\n",
428 * me->name);
429 * return -ENOEXEC;
430 * }
431 */
e01402b1
RB
432
433 *location = (*location & ~0x03ffffff) |
434 ((*location + (v >> 2)) & 0x03ffffff);
435 return 0;
436}
437
438static int apply_r_mips_hi16(struct module *me, uint32_t *location,
439 Elf32_Addr v)
440{
441 struct mips_hi16 *n;
442
443 /*
444 * We cannot relocate this one now because we don't know the value of
445 * the carry we need to add. Save the information, and let LO16 do the
446 * actual relocation.
447 */
448 n = kmalloc(sizeof *n, GFP_KERNEL);
449 if (!n)
450 return -ENOMEM;
451
452 n->addr = location;
453 n->value = v;
454 n->next = mips_hi16_list;
455 mips_hi16_list = n;
456
457 return 0;
458}
459
460static int apply_r_mips_lo16(struct module *me, uint32_t *location,
461 Elf32_Addr v)
462{
463 unsigned long insnlo = *location;
464 Elf32_Addr val, vallo;
477c4b07 465 struct mips_hi16 *l, *next;
e01402b1
RB
466
467 /* Sign extend the addend we extract from the lo insn. */
468 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
469
470 if (mips_hi16_list != NULL) {
e01402b1
RB
471
472 l = mips_hi16_list;
473 while (l != NULL) {
e01402b1
RB
474 unsigned long insn;
475
476 /*
477 * The value for the HI16 had best be the same.
478 */
2600990e
RB
479 if (v != l->value) {
480 printk(KERN_DEBUG "VPE loader: "
b1e3afa0 481 "apply_r_mips_lo16/hi16: \t"
2600990e 482 "inconsistent value information\n");
477c4b07 483 goto out_free;
e01402b1
RB
484 }
485
e01402b1
RB
486 /*
487 * Do the HI16 relocation. Note that we actually don't
488 * need to know anything about the LO16 itself, except
489 * where to find the low 16 bits of the addend needed
490 * by the LO16.
491 */
492 insn = *l->addr;
493 val = ((insn & 0xffff) << 16) + vallo;
494 val += v;
495
496 /*
497 * Account for the sign extension that will happen in
498 * the low bits.
499 */
500 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
501
502 insn = (insn & ~0xffff) | val;
503 *l->addr = insn;
504
505 next = l->next;
506 kfree(l);
507 l = next;
508 }
509
510 mips_hi16_list = NULL;
511 }
512
513 /*
514 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
515 */
516 val = v + vallo;
517 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
518 *location = insnlo;
519
520 return 0;
477c4b07
RB
521
522out_free:
523 while (l != NULL) {
524 next = l->next;
525 kfree(l);
526 l = next;
527 }
528 mips_hi16_list = NULL;
529
530 return -ENOEXEC;
e01402b1
RB
531}
532
533static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
534 Elf32_Addr v) = {
535 [R_MIPS_NONE] = apply_r_mips_none,
536 [R_MIPS_32] = apply_r_mips_32,
537 [R_MIPS_26] = apply_r_mips_26,
538 [R_MIPS_HI16] = apply_r_mips_hi16,
539 [R_MIPS_LO16] = apply_r_mips_lo16,
540 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
541 [R_MIPS_PC16] = apply_r_mips_pc16
542};
543
2600990e 544static char *rstrs[] = {
e0daad44 545 [R_MIPS_NONE] = "MIPS_NONE",
2600990e
RB
546 [R_MIPS_32] = "MIPS_32",
547 [R_MIPS_26] = "MIPS_26",
548 [R_MIPS_HI16] = "MIPS_HI16",
549 [R_MIPS_LO16] = "MIPS_LO16",
550 [R_MIPS_GPREL16] = "MIPS_GPREL16",
551 [R_MIPS_PC16] = "MIPS_PC16"
552};
e01402b1
RB
553
554int apply_relocations(Elf32_Shdr *sechdrs,
555 const char *strtab,
556 unsigned int symindex,
557 unsigned int relsec,
558 struct module *me)
559{
560 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
561 Elf32_Sym *sym;
562 uint32_t *location;
563 unsigned int i;
564 Elf32_Addr v;
565 int res;
566
567 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
568 Elf32_Word r_info = rel[i].r_info;
569
570 /* This is where to make the change */
571 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
572 + rel[i].r_offset;
573 /* This is the symbol it is referring to */
574 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
575 + ELF32_R_SYM(r_info);
576
577 if (!sym->st_value) {
578 printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
579 me->name, strtab + sym->st_name);
580 /* just print the warning, dont barf */
581 }
582
583 v = sym->st_value;
584
585 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
586 if( res ) {
2600990e
RB
587 char *r = rstrs[ELF32_R_TYPE(r_info)];
588 printk(KERN_WARNING "VPE loader: .text+0x%x "
589 "relocation type %s for symbol \"%s\" failed\n",
590 rel[i].r_offset, r ? r : "UNKNOWN",
591 strtab + sym->st_name);
e01402b1 592 return res;
2600990e 593 }
e01402b1
RB
594 }
595
596 return 0;
597}
598
599void save_gp_address(unsigned int secbase, unsigned int rel)
600{
601 gp_addr = secbase + rel;
602 gp_offs = gp_addr - (secbase & 0xffff0000);
603}
604/* end module-elf32.c */
605
606
607
608/* Change all symbols so that sh_value encodes the pointer directly. */
2600990e 609static void simplify_symbols(Elf_Shdr * sechdrs,
e01402b1
RB
610 unsigned int symindex,
611 const char *strtab,
612 const char *secstrings,
613 unsigned int nsecs, struct module *mod)
614{
615 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
616 unsigned long secbase, bssbase = 0;
617 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
2600990e 618 int size;
e01402b1
RB
619
620 /* find the .bss section for COMMON symbols */
621 for (i = 0; i < nsecs; i++) {
2600990e 622 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
e01402b1 623 bssbase = sechdrs[i].sh_addr;
2600990e
RB
624 break;
625 }
e01402b1
RB
626 }
627
628 for (i = 1; i < n; i++) {
629 switch (sym[i].st_shndx) {
630 case SHN_COMMON:
2600990e
RB
631 /* Allocate space for the symbol in the .bss section.
632 st_value is currently size.
e01402b1
RB
633 We want it to have the address of the symbol. */
634
635 size = sym[i].st_value;
636 sym[i].st_value = bssbase;
637
638 bssbase += size;
639 break;
640
641 case SHN_ABS:
642 /* Don't need to do anything */
643 break;
644
645 case SHN_UNDEF:
646 /* ret = -ENOENT; */
647 break;
648
649 case SHN_MIPS_SCOMMON:
b1e3afa0 650 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
2600990e
RB
651 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
652 sym[i].st_shndx);
e01402b1
RB
653 // .sbss section
654 break;
655
656 default:
657 secbase = sechdrs[sym[i].st_shndx].sh_addr;
658
659 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
660 save_gp_address(secbase, sym[i].st_value);
661 }
662
663 sym[i].st_value += secbase;
664 break;
665 }
e01402b1 666 }
e01402b1
RB
667}
668
669#ifdef DEBUG_ELFLOADER
670static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
671 const char *strtab, struct module *mod)
672{
673 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
674 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
675
676 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
677 for (i = 1; i < n; i++) {
678 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
679 strtab + sym[i].st_name, sym[i].st_value);
680 }
681}
682#endif
683
e01402b1 684/* We are prepared so configure and start the VPE... */
be6e1437 685static int vpe_run(struct vpe * v)
e01402b1 686{
07cc0c9e 687 unsigned long flags, val, dmt_flag;
2600990e 688 struct vpe_notifications *n;
07cc0c9e 689 unsigned int vpeflags;
e01402b1
RB
690 struct tc *t;
691
692 /* check we are the Master VPE */
07cc0c9e 693 local_irq_save(flags);
e01402b1
RB
694 val = read_c0_vpeconf0();
695 if (!(val & VPECONF0_MVP)) {
696 printk(KERN_WARNING
2600990e 697 "VPE loader: only Master VPE's are allowed to configure MT\n");
07cc0c9e
RB
698 local_irq_restore(flags);
699
e01402b1
RB
700 return -1;
701 }
702
07cc0c9e
RB
703 dmt_flag = dmt();
704 vpeflags = dvpe();
e01402b1 705
2600990e 706 if (!list_empty(&v->tc)) {
e0daad44 707 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
07cc0c9e
RB
708 evpe(vpeflags);
709 emt(dmt_flag);
710 local_irq_restore(flags);
711
712 printk(KERN_WARNING
713 "VPE loader: TC %d is already in use.\n",
714 t->index);
e0daad44
RB
715 return -ENOEXEC;
716 }
717 } else {
07cc0c9e
RB
718 evpe(vpeflags);
719 emt(dmt_flag);
720 local_irq_restore(flags);
721
722 printk(KERN_WARNING
723 "VPE loader: No TC's associated with VPE %d\n",
e0daad44 724 v->minor);
07cc0c9e 725
e0daad44
RB
726 return -ENOEXEC;
727 }
2600990e 728
e01402b1 729 /* Put MVPE's into 'configuration state' */
340ee4b9 730 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 731
e01402b1
RB
732 settc(t->index);
733
e01402b1
RB
734 /* should check it is halted, and not activated */
735 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
07cc0c9e
RB
736 evpe(vpeflags);
737 emt(dmt_flag);
738 local_irq_restore(flags);
739
740 printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
e01402b1 741 t->index);
07cc0c9e 742
e01402b1
RB
743 return -ENOEXEC;
744 }
745
746 /* Write the address we want it to start running from in the TCPC register. */
747 write_tc_c0_tcrestart((unsigned long)v->__start);
e01402b1 748 write_tc_c0_tccontext((unsigned long)0);
07cc0c9e 749
2600990e
RB
750 /*
751 * Mark the TC as activated, not interrupt exempt and not dynamically
752 * allocatable
753 */
e01402b1
RB
754 val = read_tc_c0_tcstatus();
755 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
756 write_tc_c0_tcstatus(val);
757
758 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
759
e01402b1
RB
760 /*
761 * The sde-kit passes 'memsize' to __start in $a3, so set something
2600990e 762 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
e01402b1
RB
763 * DFLT_HEAP_SIZE when you compile your program
764 */
41790e04 765 mttgpr(6, v->ntcs);
07cc0c9e 766 mttgpr(7, physical_memsize);
2600990e
RB
767
768 /* set up VPE1 */
769 /*
770 * bind the TC to VPE 1 as late as possible so we only have the final
771 * VPE registers to set up, and so an EJTAG probe can trigger on it
772 */
07cc0c9e 773 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
e01402b1 774
a94d7020
EO
775 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
776
777 back_to_back_c0_hazard();
778
e0daad44
RB
779 /* Set up the XTC bit in vpeconf0 to point at our tc */
780 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
781 | (t->index << VPECONF0_XTC_SHIFT));
e01402b1 782
a94d7020
EO
783 back_to_back_c0_hazard();
784
e0daad44
RB
785 /* enable this VPE */
786 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
e01402b1
RB
787
788 /* clear out any left overs from a previous program */
2600990e 789 write_vpe_c0_status(0);
e01402b1
RB
790 write_vpe_c0_cause(0);
791
792 /* take system out of configuration state */
340ee4b9 793 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 794
b618336a
KK
795 /*
796 * SMTC/SMVP kernels manage VPE enable independently,
797 * but uniprocessor kernels need to turn it on, even
798 * if that wasn't the pre-dvpe() state.
799 */
07cc0c9e 800#ifdef CONFIG_SMP
07cc0c9e 801 evpe(vpeflags);
b618336a
KK
802#else
803 evpe(EVPE_ENABLE);
07cc0c9e
RB
804#endif
805 emt(dmt_flag);
806 local_irq_restore(flags);
e01402b1 807
07cc0c9e
RB
808 list_for_each_entry(n, &v->notify, list)
809 n->start(minor);
2600990e 810
e01402b1
RB
811 return 0;
812}
813
2600990e 814static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
e01402b1
RB
815 unsigned int symindex, const char *strtab,
816 struct module *mod)
817{
818 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
819 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
820
821 for (i = 1; i < n; i++) {
822 if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
823 v->__start = sym[i].st_value;
824 }
825
826 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
827 v->shared_ptr = (void *)sym[i].st_value;
828 }
829 }
830
2600990e
RB
831 if ( (v->__start == 0) || (v->shared_ptr == NULL))
832 return -1;
833
e01402b1
RB
834 return 0;
835}
836
307bd284 837/*
2600990e
RB
838 * Allocates a VPE with some program code space(the load address), copies the
839 * contents of the program (p)buffer performing relocatations/etc, free's it
840 * when finished.
841 */
be6e1437 842static int vpe_elfload(struct vpe * v)
e01402b1
RB
843{
844 Elf_Ehdr *hdr;
845 Elf_Shdr *sechdrs;
846 long err = 0;
847 char *secstrings, *strtab = NULL;
2600990e 848 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
e01402b1
RB
849 struct module mod; // so we can re-use the relocations code
850
851 memset(&mod, 0, sizeof(struct module));
2600990e 852 strcpy(mod.name, "VPE loader");
e01402b1
RB
853
854 hdr = (Elf_Ehdr *) v->pbuffer;
855 len = v->plen;
856
857 /* Sanity checks against insmoding binaries or wrong arch,
858 weird elf version */
d303f4a1 859 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2600990e
RB
860 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
861 || !elf_check_arch(hdr)
e01402b1
RB
862 || hdr->e_shentsize != sizeof(*sechdrs)) {
863 printk(KERN_WARNING
2600990e 864 "VPE loader: program wrong arch or weird elf version\n");
e01402b1
RB
865
866 return -ENOEXEC;
867 }
868
2600990e
RB
869 if (hdr->e_type == ET_REL)
870 relocate = 1;
871
e01402b1 872 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2600990e
RB
873 printk(KERN_ERR "VPE loader: program length %u truncated\n",
874 len);
875
e01402b1
RB
876 return -ENOEXEC;
877 }
878
879 /* Convenience variables */
880 sechdrs = (void *)hdr + hdr->e_shoff;
881 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
882 sechdrs[0].sh_addr = 0;
883
884 /* And these should exist, but gcc whinges if we don't init them */
885 symindex = strindex = 0;
886
2600990e
RB
887 if (relocate) {
888 for (i = 1; i < hdr->e_shnum; i++) {
889 if (sechdrs[i].sh_type != SHT_NOBITS
890 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
891 printk(KERN_ERR "VPE program length %u truncated\n",
892 len);
893 return -ENOEXEC;
894 }
e01402b1 895
2600990e
RB
896 /* Mark all sections sh_addr with their address in the
897 temporary image. */
898 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
e01402b1 899
2600990e
RB
900 /* Internal symbols and strings. */
901 if (sechdrs[i].sh_type == SHT_SYMTAB) {
902 symindex = i;
903 strindex = sechdrs[i].sh_link;
904 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
905 }
e01402b1 906 }
2600990e 907 layout_sections(&mod, hdr, sechdrs, secstrings);
e01402b1
RB
908 }
909
e01402b1 910 v->load_addr = alloc_progmem(mod.core_size);
5408c490
RB
911 if (!v->load_addr)
912 return -ENOMEM;
e01402b1 913
5408c490 914 pr_info("VPE loader: loading to %p\n", v->load_addr);
e01402b1 915
2600990e
RB
916 if (relocate) {
917 for (i = 0; i < hdr->e_shnum; i++) {
918 void *dest;
e01402b1 919
2600990e
RB
920 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
921 continue;
e01402b1 922
2600990e 923 dest = v->load_addr + sechdrs[i].sh_entsize;
e01402b1 924
2600990e
RB
925 if (sechdrs[i].sh_type != SHT_NOBITS)
926 memcpy(dest, (void *)sechdrs[i].sh_addr,
927 sechdrs[i].sh_size);
928 /* Update sh_addr to point to copy in image. */
929 sechdrs[i].sh_addr = (unsigned long)dest;
e01402b1 930
2600990e
RB
931 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
932 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
933 }
e01402b1 934
2600990e
RB
935 /* Fix up syms, so that st_value is a pointer to location. */
936 simplify_symbols(sechdrs, symindex, strtab, secstrings,
937 hdr->e_shnum, &mod);
938
939 /* Now do relocations. */
940 for (i = 1; i < hdr->e_shnum; i++) {
941 const char *strtab = (char *)sechdrs[strindex].sh_addr;
942 unsigned int info = sechdrs[i].sh_info;
943
944 /* Not a valid relocation section? */
945 if (info >= hdr->e_shnum)
946 continue;
947
948 /* Don't bother with non-allocated sections */
949 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
950 continue;
951
952 if (sechdrs[i].sh_type == SHT_REL)
953 err = apply_relocations(sechdrs, strtab, symindex, i,
954 &mod);
955 else if (sechdrs[i].sh_type == SHT_RELA)
956 err = apply_relocate_add(sechdrs, strtab, symindex, i,
957 &mod);
958 if (err < 0)
959 return err;
960
961 }
962 } else {
bdf5d42c 963 struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
2600990e 964
bdf5d42c 965 for (i = 0; i < hdr->e_phnum; i++) {
b618336a
KK
966 if (phdr->p_type == PT_LOAD) {
967 memcpy((void *)phdr->p_paddr,
968 (char *)hdr + phdr->p_offset,
969 phdr->p_filesz);
970 memset((void *)phdr->p_paddr + phdr->p_filesz,
971 0, phdr->p_memsz - phdr->p_filesz);
972 }
973 phdr++;
bdf5d42c
RB
974 }
975
976 for (i = 0; i < hdr->e_shnum; i++) {
2600990e
RB
977 /* Internal symbols and strings. */
978 if (sechdrs[i].sh_type == SHT_SYMTAB) {
979 symindex = i;
980 strindex = sechdrs[i].sh_link;
981 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
982
983 /* mark the symtab's address for when we try to find the
984 magic symbols */
985 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
986 }
e01402b1
RB
987 }
988 }
989
990 /* make sure it's physically written out */
991 flush_icache_range((unsigned long)v->load_addr,
992 (unsigned long)v->load_addr + v->len);
993
994 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
2600990e
RB
995 if (v->__start == 0) {
996 printk(KERN_WARNING "VPE loader: program does not contain "
997 "a __start symbol\n");
998 return -ENOEXEC;
999 }
e01402b1 1000
2600990e
RB
1001 if (v->shared_ptr == NULL)
1002 printk(KERN_WARNING "VPE loader: "
1003 "program does not contain vpe_shared symbol.\n"
1004 " Unable to use AMVP (AP/SP) facilities.\n");
e01402b1
RB
1005 }
1006
1007 printk(" elf loaded\n");
2600990e 1008 return 0;
e01402b1
RB
1009}
1010
2600990e
RB
1011static void cleanup_tc(struct tc *tc)
1012{
07cc0c9e
RB
1013 unsigned long flags;
1014 unsigned int mtflags, vpflags;
2600990e
RB
1015 int tmp;
1016
07cc0c9e
RB
1017 local_irq_save(flags);
1018 mtflags = dmt();
1019 vpflags = dvpe();
2600990e
RB
1020 /* Put MVPE's into 'configuration state' */
1021 set_c0_mvpcontrol(MVPCONTROL_VPC);
1022
1023 settc(tc->index);
1024 tmp = read_tc_c0_tcstatus();
1025
1026 /* mark not allocated and not dynamically allocatable */
1027 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1028 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1029 write_tc_c0_tcstatus(tmp);
1030
1031 write_tc_c0_tchalt(TCHALT_H);
7c3a622d 1032 mips_ihb();
2600990e
RB
1033
1034 /* bind it to anything other than VPE1 */
07cc0c9e 1035// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
2600990e
RB
1036
1037 clear_c0_mvpcontrol(MVPCONTROL_VPC);
07cc0c9e
RB
1038 evpe(vpflags);
1039 emt(mtflags);
1040 local_irq_restore(flags);
2600990e
RB
1041}
1042
1043static int getcwd(char *buff, int size)
1044{
1045 mm_segment_t old_fs;
1046 int ret;
1047
1048 old_fs = get_fs();
1049 set_fs(KERNEL_DS);
1050
21a151d8 1051 ret = sys_getcwd(buff, size);
2600990e
RB
1052
1053 set_fs(old_fs);
1054
1055 return ret;
1056}
1057
1058/* checks VPE is unused and gets ready to load program */
e01402b1
RB
1059static int vpe_open(struct inode *inode, struct file *filp)
1060{
c4c4018b 1061 enum vpe_state state;
2600990e 1062 struct vpe_notifications *not;
07cc0c9e 1063 struct vpe *v;
7558da94 1064 int ret, err = 0;
e01402b1 1065
7558da94 1066 lock_kernel();
07cc0c9e
RB
1067 if (minor != iminor(inode)) {
1068 /* assume only 1 device at the moment. */
2600990e 1069 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
7558da94
JC
1070 err = -ENODEV;
1071 goto out;
e01402b1
RB
1072 }
1073
07cc0c9e 1074 if ((v = get_vpe(tclimit)) == NULL) {
2600990e 1075 printk(KERN_WARNING "VPE loader: unable to get vpe\n");
7558da94
JC
1076 err = -ENODEV;
1077 goto out;
e01402b1
RB
1078 }
1079
c4c4018b
RB
1080 state = xchg(&v->state, VPE_STATE_INUSE);
1081 if (state != VPE_STATE_UNUSED) {
2600990e 1082 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
e01402b1 1083
2600990e 1084 list_for_each_entry(not, &v->notify, list) {
07cc0c9e 1085 not->stop(tclimit);
2600990e 1086 }
e01402b1 1087
2600990e 1088 release_progmem(v->load_addr);
07cc0c9e 1089 cleanup_tc(get_tc(tclimit));
e01402b1
RB
1090 }
1091
e01402b1
RB
1092 /* this of-course trashes what was there before... */
1093 v->pbuffer = vmalloc(P_SIZE);
1094 v->plen = P_SIZE;
1095 v->load_addr = NULL;
1096 v->len = 0;
1097
d76b0d9b
DH
1098 v->uid = filp->f_cred->fsuid;
1099 v->gid = filp->f_cred->fsgid;
2600990e
RB
1100
1101#ifdef CONFIG_MIPS_APSP_KSPD
1102 /* get kspd to tell us when a syscall_exit happens */
1103 if (!kspd_events_reqd) {
1104 kspd_notify(&kspd_events);
1105 kspd_events_reqd++;
1106 }
1107#endif
1108
1109 v->cwd[0] = 0;
1110 ret = getcwd(v->cwd, VPE_PATH_MAX);
1111 if (ret < 0)
1112 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1113
1114 v->shared_ptr = NULL;
1115 v->__start = 0;
07cc0c9e 1116
7558da94
JC
1117out:
1118 unlock_kernel();
e01402b1
RB
1119 return 0;
1120}
1121
1122static int vpe_release(struct inode *inode, struct file *filp)
1123{
307bd284 1124 struct vpe *v;
e01402b1 1125 Elf_Ehdr *hdr;
07cc0c9e 1126 int ret = 0;
e01402b1 1127
07cc0c9e
RB
1128 v = get_vpe(tclimit);
1129 if (v == NULL)
e01402b1
RB
1130 return -ENODEV;
1131
e01402b1 1132 hdr = (Elf_Ehdr *) v->pbuffer;
d303f4a1 1133 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
07cc0c9e 1134 if (vpe_elfload(v) >= 0) {
e01402b1 1135 vpe_run(v);
07cc0c9e 1136 } else {
2600990e 1137 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
e01402b1
RB
1138 ret = -ENOEXEC;
1139 }
1140 } else {
2600990e 1141 printk(KERN_WARNING "VPE loader: only elf files are supported\n");
e01402b1
RB
1142 ret = -ENOEXEC;
1143 }
1144
2600990e
RB
1145 /* It's good to be able to run the SP and if it chokes have a look at
1146 the /dev/rt?. But if we reset the pointer to the shared struct we
8ebcfc8b 1147 lose what has happened. So perhaps if garbage is sent to the vpe
2600990e
RB
1148 device, use it as a trigger for the reset. Hopefully a nice
1149 executable will be along shortly. */
1150 if (ret < 0)
1151 v->shared_ptr = NULL;
1152
e01402b1
RB
1153 // cleanup any temp buffers
1154 if (v->pbuffer)
1155 vfree(v->pbuffer);
1156 v->plen = 0;
1157 return ret;
1158}
1159
1160static ssize_t vpe_write(struct file *file, const char __user * buffer,
1161 size_t count, loff_t * ppos)
1162{
e01402b1 1163 size_t ret = count;
307bd284 1164 struct vpe *v;
e01402b1 1165
07cc0c9e
RB
1166 if (iminor(file->f_path.dentry->d_inode) != minor)
1167 return -ENODEV;
1168
1169 v = get_vpe(tclimit);
1170 if (v == NULL)
e01402b1
RB
1171 return -ENODEV;
1172
1173 if (v->pbuffer == NULL) {
2600990e 1174 printk(KERN_ERR "VPE loader: no buffer for program\n");
e01402b1
RB
1175 return -ENOMEM;
1176 }
1177
1178 if ((count + v->len) > v->plen) {
1179 printk(KERN_WARNING
2600990e 1180 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
e01402b1
RB
1181 return -ENOMEM;
1182 }
1183
1184 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
2600990e 1185 if (!count)
e01402b1 1186 return -EFAULT;
e01402b1
RB
1187
1188 v->len += count;
1189 return ret;
1190}
1191
5dfe4c96 1192static const struct file_operations vpe_fops = {
e01402b1
RB
1193 .owner = THIS_MODULE,
1194 .open = vpe_open,
1195 .release = vpe_release,
1196 .write = vpe_write
1197};
1198
1199/* module wrapper entry points */
1200/* give me a vpe */
1201vpe_handle vpe_alloc(void)
1202{
1203 int i;
1204 struct vpe *v;
1205
1206 /* find a vpe */
1207 for (i = 1; i < MAX_VPES; i++) {
1208 if ((v = get_vpe(i)) != NULL) {
1209 v->state = VPE_STATE_INUSE;
1210 return v;
1211 }
1212 }
1213 return NULL;
1214}
1215
1216EXPORT_SYMBOL(vpe_alloc);
1217
1218/* start running from here */
1219int vpe_start(vpe_handle vpe, unsigned long start)
1220{
1221 struct vpe *v = vpe;
1222
1223 v->__start = start;
1224 return vpe_run(v);
1225}
1226
1227EXPORT_SYMBOL(vpe_start);
1228
1229/* halt it for now */
1230int vpe_stop(vpe_handle vpe)
1231{
1232 struct vpe *v = vpe;
1233 struct tc *t;
1234 unsigned int evpe_flags;
1235
1236 evpe_flags = dvpe();
1237
1238 if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1239
1240 settc(t->index);
1241 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1242 }
1243
1244 evpe(evpe_flags);
1245
1246 return 0;
1247}
1248
1249EXPORT_SYMBOL(vpe_stop);
1250
1251/* I've done with it thank you */
1252int vpe_free(vpe_handle vpe)
1253{
1254 struct vpe *v = vpe;
1255 struct tc *t;
1256 unsigned int evpe_flags;
1257
1258 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1259 return -ENOEXEC;
1260 }
1261
1262 evpe_flags = dvpe();
1263
1264 /* Put MVPE's into 'configuration state' */
340ee4b9 1265 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1266
1267 settc(t->index);
1268 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1269
7c3a622d 1270 /* halt the TC */
e01402b1 1271 write_tc_c0_tchalt(TCHALT_H);
7c3a622d
NS
1272 mips_ihb();
1273
1274 /* mark the TC unallocated */
1275 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
e01402b1
RB
1276
1277 v->state = VPE_STATE_UNUSED;
1278
340ee4b9 1279 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1280 evpe(evpe_flags);
1281
1282 return 0;
1283}
1284
1285EXPORT_SYMBOL(vpe_free);
1286
1287void *vpe_get_shared(int index)
1288{
1289 struct vpe *v;
1290
2600990e 1291 if ((v = get_vpe(index)) == NULL)
e01402b1 1292 return NULL;
e01402b1
RB
1293
1294 return v->shared_ptr;
1295}
1296
1297EXPORT_SYMBOL(vpe_get_shared);
1298
2600990e
RB
1299int vpe_getuid(int index)
1300{
1301 struct vpe *v;
1302
1303 if ((v = get_vpe(index)) == NULL)
1304 return -1;
1305
1306 return v->uid;
1307}
1308
1309EXPORT_SYMBOL(vpe_getuid);
1310
1311int vpe_getgid(int index)
1312{
1313 struct vpe *v;
1314
1315 if ((v = get_vpe(index)) == NULL)
1316 return -1;
1317
1318 return v->gid;
1319}
1320
1321EXPORT_SYMBOL(vpe_getgid);
1322
1323int vpe_notify(int index, struct vpe_notifications *notify)
1324{
1325 struct vpe *v;
1326
1327 if ((v = get_vpe(index)) == NULL)
1328 return -1;
1329
1330 list_add(&notify->list, &v->notify);
1331 return 0;
1332}
1333
1334EXPORT_SYMBOL(vpe_notify);
1335
1336char *vpe_getcwd(int index)
1337{
1338 struct vpe *v;
1339
1340 if ((v = get_vpe(index)) == NULL)
1341 return NULL;
1342
1343 return v->cwd;
1344}
1345
1346EXPORT_SYMBOL(vpe_getcwd);
1347
1348#ifdef CONFIG_MIPS_APSP_KSPD
1349static void kspd_sp_exit( int sp_id)
1350{
1351 cleanup_tc(get_tc(sp_id));
1352}
1353#endif
1354
736fad17
KS
1355static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1356 const char *buf, size_t len)
0f5d0df3
RB
1357{
1358 struct vpe *vpe = get_vpe(tclimit);
1359 struct vpe_notifications *not;
1360
1361 list_for_each_entry(not, &vpe->notify, list) {
1362 not->stop(tclimit);
1363 }
1364
1365 release_progmem(vpe->load_addr);
1366 cleanup_tc(get_tc(tclimit));
1367 vpe_stop(vpe);
1368 vpe_free(vpe);
1369
1370 return len;
1371}
1372
736fad17
KS
1373static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
1374 char *buf)
41790e04
RB
1375{
1376 struct vpe *vpe = get_vpe(tclimit);
1377
1378 return sprintf(buf, "%d\n", vpe->ntcs);
1379}
1380
736fad17
KS
1381static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
1382 const char *buf, size_t len)
41790e04
RB
1383{
1384 struct vpe *vpe = get_vpe(tclimit);
1385 unsigned long new;
1386 char *endp;
1387
1388 new = simple_strtoul(buf, &endp, 0);
1389 if (endp == buf)
1390 goto out_einval;
1391
1392 if (new == 0 || new > (hw_tcs - tclimit))
1393 goto out_einval;
1394
1395 vpe->ntcs = new;
1396
1397 return len;
1398
1399out_einval:
52a7a27c 1400 return -EINVAL;
41790e04
RB
1401}
1402
736fad17 1403static struct device_attribute vpe_class_attributes[] = {
0f5d0df3 1404 __ATTR(kill, S_IWUSR, NULL, store_kill),
41790e04
RB
1405 __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
1406 {}
1407};
1408
736fad17 1409static void vpe_device_release(struct device *cd)
41790e04
RB
1410{
1411 kfree(cd);
1412}
1413
1414struct class vpe_class = {
1415 .name = "vpe",
1416 .owner = THIS_MODULE,
736fad17
KS
1417 .dev_release = vpe_device_release,
1418 .dev_attrs = vpe_class_attributes,
41790e04
RB
1419};
1420
736fad17 1421struct device vpe_device;
27a3bbaf 1422
e01402b1
RB
1423static int __init vpe_module_init(void)
1424{
07cc0c9e 1425 unsigned int mtflags, vpflags;
07cc0c9e 1426 unsigned long flags, val;
e01402b1
RB
1427 struct vpe *v = NULL;
1428 struct tc *t;
41790e04 1429 int tc, err;
e01402b1
RB
1430
1431 if (!cpu_has_mipsmt) {
1432 printk("VPE loader: not a MIPS MT capable processor\n");
1433 return -ENODEV;
1434 }
1435
07cc0c9e
RB
1436 if (vpelimit == 0) {
1437 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1438 "initializing VPE loader.\nPass maxvpes=<n> argument as "
1439 "kernel argument\n");
1440
1441 return -ENODEV;
1442 }
1443
1444 if (tclimit == 0) {
1445 printk(KERN_WARNING "No TCs reserved for AP/SP, not "
1446 "initializing VPE loader.\nPass maxtcs=<n> argument as "
1447 "kernel argument\n");
1448
1449 return -ENODEV;
1450 }
1451
682e852e
AD
1452 major = register_chrdev(0, module_name, &vpe_fops);
1453 if (major < 0) {
e01402b1 1454 printk("VPE loader: unable to register character device\n");
307bd284 1455 return major;
e01402b1
RB
1456 }
1457
41790e04
RB
1458 err = class_register(&vpe_class);
1459 if (err) {
1460 printk(KERN_ERR "vpe_class registration failed\n");
27a3bbaf
RB
1461 goto out_chrdev;
1462 }
41790e04 1463
736fad17 1464 device_initialize(&vpe_device);
41790e04
RB
1465 vpe_device.class = &vpe_class,
1466 vpe_device.parent = NULL,
1bb5beb4 1467 dev_set_name(&vpe_device, "vpe1");
41790e04 1468 vpe_device.devt = MKDEV(major, minor);
736fad17 1469 err = device_add(&vpe_device);
41790e04
RB
1470 if (err) {
1471 printk(KERN_ERR "Adding vpe_device failed\n");
1472 goto out_class;
1473 }
27a3bbaf 1474
07cc0c9e
RB
1475 local_irq_save(flags);
1476 mtflags = dmt();
1477 vpflags = dvpe();
e01402b1
RB
1478
1479 /* Put MVPE's into 'configuration state' */
340ee4b9 1480 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1481
1482 /* dump_mtregs(); */
1483
e01402b1 1484 val = read_c0_mvpconf0();
07cc0c9e
RB
1485 hw_tcs = (val & MVPCONF0_PTC) + 1;
1486 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
1487
1488 for (tc = tclimit; tc < hw_tcs; tc++) {
1489 /*
1490 * Must re-enable multithreading temporarily or in case we
1491 * reschedule send IPIs or similar we might hang.
1492 */
1493 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1494 evpe(vpflags);
1495 emt(mtflags);
1496 local_irq_restore(flags);
1497 t = alloc_tc(tc);
1498 if (!t) {
1499 err = -ENOMEM;
1500 goto out;
1501 }
1502
1503 local_irq_save(flags);
1504 mtflags = dmt();
1505 vpflags = dvpe();
1506 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1507
1508 /* VPE's */
07cc0c9e
RB
1509 if (tc < hw_tcs) {
1510 settc(tc);
e01402b1 1511
07cc0c9e 1512 if ((v = alloc_vpe(tc)) == NULL) {
e01402b1 1513 printk(KERN_WARNING "VPE: unable to allocate VPE\n");
07cc0c9e
RB
1514
1515 goto out_reenable;
e01402b1
RB
1516 }
1517
41790e04
RB
1518 v->ntcs = hw_tcs - tclimit;
1519
2600990e
RB
1520 /* add the tc to the list of this vpe's tc's. */
1521 list_add(&t->tc, &v->tc);
e01402b1
RB
1522
1523 /* deactivate all but vpe0 */
07cc0c9e 1524 if (tc >= tclimit) {
e01402b1
RB
1525 unsigned long tmp = read_vpe_c0_vpeconf0();
1526
1527 tmp &= ~VPECONF0_VPA;
1528
1529 /* master VPE */
1530 tmp |= VPECONF0_MVP;
1531 write_vpe_c0_vpeconf0(tmp);
1532 }
1533
1534 /* disable multi-threading with TC's */
1535 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
1536
07cc0c9e 1537 if (tc >= vpelimit) {
2600990e
RB
1538 /*
1539 * Set config to be the same as vpe0,
1540 * particularly kseg0 coherency alg
1541 */
e01402b1
RB
1542 write_vpe_c0_config(read_c0_config());
1543 }
e01402b1
RB
1544 }
1545
1546 /* TC's */
1547 t->pvpe = v; /* set the parent vpe */
1548
07cc0c9e 1549 if (tc >= tclimit) {
e01402b1
RB
1550 unsigned long tmp;
1551
07cc0c9e 1552 settc(tc);
e01402b1 1553
2600990e
RB
1554 /* Any TC that is bound to VPE0 gets left as is - in case
1555 we are running SMTC on VPE0. A TC that is bound to any
1556 other VPE gets bound to VPE0, ideally I'd like to make
1557 it homeless but it doesn't appear to let me bind a TC
1558 to a non-existent VPE. Which is perfectly reasonable.
1559
1560 The (un)bound state is visible to an EJTAG probe so may
1561 notify GDB...
1562 */
1563
1564 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1565 /* tc is bound >vpe0 */
1566 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1567
1568 t->pvpe = get_vpe(0); /* set the parent vpe */
1569 }
e01402b1 1570
7c3a622d
NS
1571 /* halt the TC */
1572 write_tc_c0_tchalt(TCHALT_H);
1573 mips_ihb();
1574
e01402b1
RB
1575 tmp = read_tc_c0_tcstatus();
1576
2600990e 1577 /* mark not activated and not dynamically allocatable */
e01402b1
RB
1578 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1579 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1580 write_tc_c0_tcstatus(tmp);
e01402b1
RB
1581 }
1582 }
1583
07cc0c9e 1584out_reenable:
e01402b1 1585 /* release config state */
340ee4b9 1586 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 1587
07cc0c9e
RB
1588 evpe(vpflags);
1589 emt(mtflags);
1590 local_irq_restore(flags);
1591
2600990e
RB
1592#ifdef CONFIG_MIPS_APSP_KSPD
1593 kspd_events.kspd_sp_exit = kspd_sp_exit;
1594#endif
e01402b1 1595 return 0;
27a3bbaf 1596
41790e04
RB
1597out_class:
1598 class_unregister(&vpe_class);
27a3bbaf
RB
1599out_chrdev:
1600 unregister_chrdev(major, module_name);
1601
07cc0c9e 1602out:
27a3bbaf 1603 return err;
e01402b1
RB
1604}
1605
1606static void __exit vpe_module_exit(void)
1607{
1608 struct vpe *v, *n;
1609
1610 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1611 if (v->state != VPE_STATE_UNUSED) {
1612 release_vpe(v);
1613 }
1614 }
1615
736fad17 1616 device_del(&vpe_device);
e01402b1
RB
1617 unregister_chrdev(major, module_name);
1618}
1619
1620module_init(vpe_module_init);
1621module_exit(vpe_module_exit);
1622MODULE_DESCRIPTION("MIPS VPE Loader");
2600990e 1623MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
e01402b1 1624MODULE_LICENSE("GPL");