]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kernel/unwind.c
[IA64] Improve unwind checking.
[net-next-2.6.git] / arch / ia64 / kernel / unwind.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
72fdbdce 5 * - Change pt_regs_off() to make it less dependent on pt_regs structure.
1da177e4
LT
6 */
7/*
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 *
20 * SMP conventions:
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
28 */
29#include <linux/module.h>
30#include <linux/bootmem.h>
31#include <linux/elf.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35
36#include <asm/unwind.h>
37
38#include <asm/delay.h>
39#include <asm/page.h>
40#include <asm/ptrace.h>
41#include <asm/ptrace_offsets.h>
42#include <asm/rse.h>
43#include <asm/sections.h>
44#include <asm/system.h>
45#include <asm/uaccess.h>
46
47#include "entry.h"
48#include "unwind_i.h"
49
50#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
51#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
52
53#define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
54#define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
55
56#define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
57
58#ifdef UNW_DEBUG
59 static unsigned int unw_debug_level = UNW_DEBUG;
60# define UNW_DEBUG_ON(n) unw_debug_level >= n
61 /* Do not code a printk level, not all debug lines end in newline */
62# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
690def21 63# undef inline
1da177e4
LT
64# define inline
65#else /* !UNW_DEBUG */
66# define UNW_DEBUG_ON(n) 0
67# define UNW_DPRINT(n, ...)
68#endif /* UNW_DEBUG */
69
70#if UNW_STATS
71# define STAT(x...) x
72#else
73# define STAT(x...)
74#endif
75
76#define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
77#define free_reg_state(usr) kfree(usr)
78#define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
79#define free_labeled_state(usr) kfree(usr)
80
81typedef unsigned long unw_word;
82typedef unsigned char unw_hash_index_t;
83
84static struct {
85 spinlock_t lock; /* spinlock for unwind data */
86
87 /* list of unwind tables (one per load-module) */
88 struct unw_table *tables;
89
90 unsigned long r0; /* constant 0 for r0 */
91
92 /* table of registers that prologues can save (and order in which they're saved): */
93 const unsigned char save_order[8];
94
95 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
96 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
97
98 unsigned short lru_head; /* index of lead-recently used script */
99 unsigned short lru_tail; /* index of most-recently used script */
100
101 /* index into unw_frame_info for preserved register i */
102 unsigned short preg_index[UNW_NUM_REGS];
103
104 short pt_regs_offsets[32];
105
106 /* unwind table for the kernel: */
107 struct unw_table kernel_table;
108
109 /* unwind table describing the gate page (kernel code that is mapped into user space): */
110 size_t gate_table_size;
111 unsigned long *gate_table;
112
113 /* hash table that maps instruction pointer to script index: */
114 unsigned short hash[UNW_HASH_SIZE];
115
116 /* script cache: */
117 struct unw_script cache[UNW_CACHE_SIZE];
118
119# ifdef UNW_DEBUG
120 const char *preg_name[UNW_NUM_REGS];
121# endif
122# if UNW_STATS
123 struct {
124 struct {
125 int lookups;
126 int hinted_hits;
127 int normal_hits;
128 int collision_chain_traversals;
129 } cache;
130 struct {
131 unsigned long build_time;
132 unsigned long run_time;
133 unsigned long parse_time;
134 int builds;
135 int news;
136 int collisions;
137 int runs;
138 } script;
139 struct {
140 unsigned long init_time;
141 unsigned long unwind_time;
142 int inits;
143 int unwinds;
144 } api;
145 } stat;
146# endif
147} unw = {
148 .tables = &unw.kernel_table,
8737d595 149 .lock = __SPIN_LOCK_UNLOCKED(unw.lock),
1da177e4
LT
150 .save_order = {
151 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
152 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
153 },
154 .preg_index = {
155 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
156 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
157 offsetof(struct unw_frame_info, bsp_loc)/8,
158 offsetof(struct unw_frame_info, bspstore_loc)/8,
159 offsetof(struct unw_frame_info, pfs_loc)/8,
160 offsetof(struct unw_frame_info, rnat_loc)/8,
161 offsetof(struct unw_frame_info, psp)/8,
162 offsetof(struct unw_frame_info, rp_loc)/8,
163 offsetof(struct unw_frame_info, r4)/8,
164 offsetof(struct unw_frame_info, r5)/8,
165 offsetof(struct unw_frame_info, r6)/8,
166 offsetof(struct unw_frame_info, r7)/8,
167 offsetof(struct unw_frame_info, unat_loc)/8,
168 offsetof(struct unw_frame_info, pr_loc)/8,
169 offsetof(struct unw_frame_info, lc_loc)/8,
170 offsetof(struct unw_frame_info, fpsr_loc)/8,
171 offsetof(struct unw_frame_info, b1_loc)/8,
172 offsetof(struct unw_frame_info, b2_loc)/8,
173 offsetof(struct unw_frame_info, b3_loc)/8,
174 offsetof(struct unw_frame_info, b4_loc)/8,
175 offsetof(struct unw_frame_info, b5_loc)/8,
176 offsetof(struct unw_frame_info, f2_loc)/8,
177 offsetof(struct unw_frame_info, f3_loc)/8,
178 offsetof(struct unw_frame_info, f4_loc)/8,
179 offsetof(struct unw_frame_info, f5_loc)/8,
180 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
181 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
182 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
183 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
184 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
185 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
186 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
187 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
188 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
189 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
190 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
191 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
192 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
193 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
194 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
195 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
196 },
197 .pt_regs_offsets = {
198 [0] = -1,
199 offsetof(struct pt_regs, r1),
200 offsetof(struct pt_regs, r2),
201 offsetof(struct pt_regs, r3),
202 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
203 offsetof(struct pt_regs, r8),
204 offsetof(struct pt_regs, r9),
205 offsetof(struct pt_regs, r10),
206 offsetof(struct pt_regs, r11),
207 offsetof(struct pt_regs, r12),
208 offsetof(struct pt_regs, r13),
209 offsetof(struct pt_regs, r14),
210 offsetof(struct pt_regs, r15),
211 offsetof(struct pt_regs, r16),
212 offsetof(struct pt_regs, r17),
213 offsetof(struct pt_regs, r18),
214 offsetof(struct pt_regs, r19),
215 offsetof(struct pt_regs, r20),
216 offsetof(struct pt_regs, r21),
217 offsetof(struct pt_regs, r22),
218 offsetof(struct pt_regs, r23),
219 offsetof(struct pt_regs, r24),
220 offsetof(struct pt_regs, r25),
221 offsetof(struct pt_regs, r26),
222 offsetof(struct pt_regs, r27),
223 offsetof(struct pt_regs, r28),
224 offsetof(struct pt_regs, r29),
225 offsetof(struct pt_regs, r30),
226 offsetof(struct pt_regs, r31),
227 },
228 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
229#ifdef UNW_DEBUG
230 .preg_name = {
231 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
232 "r4", "r5", "r6", "r7",
233 "ar.unat", "pr", "ar.lc", "ar.fpsr",
234 "b1", "b2", "b3", "b4", "b5",
235 "f2", "f3", "f4", "f5",
236 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
237 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
238 }
239#endif
240};
241
242static inline int
243read_only (void *addr)
244{
245 return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
246}
247
248/*
249 * Returns offset of rREG in struct pt_regs.
250 */
251static inline unsigned long
252pt_regs_off (unsigned long reg)
253{
254 short off = -1;
255
256 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
257 off = unw.pt_regs_offsets[reg];
258
259 if (off < 0) {
260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
261 off = 0;
262 }
263 return (unsigned long) off;
264}
265
266static inline struct pt_regs *
267get_scratch_regs (struct unw_frame_info *info)
268{
269 if (!info->pt) {
270 /* This should not happen with valid unwind info. */
271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
274 else
275 info->pt = info->sp - 16;
276 }
277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
278 return (struct pt_regs *) info->pt;
279}
280
281/* Unwind accessors. */
282
283int
284unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
285{
286 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
287 struct unw_ireg *ireg;
288 struct pt_regs *pt;
289
290 if ((unsigned) regnum - 1 >= 127) {
291 if (regnum == 0 && !write) {
292 *val = 0; /* read r0 always returns 0 */
293 *nat = 0;
294 return 0;
295 }
296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
297 __FUNCTION__, regnum);
298 return -1;
299 }
300
301 if (regnum < 32) {
302 if (regnum >= 4 && regnum <= 7) {
303 /* access a preserved register */
304 ireg = &info->r4 + (regnum - 4);
305 addr = ireg->loc;
306 if (addr) {
307 nat_addr = addr + ireg->nat.off;
308 switch (ireg->nat.type) {
309 case UNW_NAT_VAL:
310 /* simulate getf.sig/setf.sig */
311 if (write) {
312 if (*nat) {
313 /* write NaTVal and be done with it */
314 addr[0] = 0;
315 addr[1] = 0x1fffe;
316 return 0;
317 }
318 addr[1] = 0x1003e;
319 } else {
320 if (addr[0] == 0 && addr[1] == 0x1ffe) {
321 /* return NaT and be done with it */
322 *val = 0;
323 *nat = 1;
324 return 0;
325 }
326 }
327 /* fall through */
328 case UNW_NAT_NONE:
329 dummy_nat = 0;
330 nat_addr = &dummy_nat;
331 break;
332
333 case UNW_NAT_MEMSTK:
334 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
335 break;
336
337 case UNW_NAT_REGSTK:
338 nat_addr = ia64_rse_rnat_addr(addr);
339 if ((unsigned long) addr < info->regstk.limit
340 || (unsigned long) addr >= info->regstk.top)
341 {
342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
343 "[0x%lx-0x%lx)\n",
344 __FUNCTION__, (void *) addr,
345 info->regstk.limit,
346 info->regstk.top);
347 return -1;
348 }
349 if ((unsigned long) nat_addr >= info->regstk.top)
350 nat_addr = &info->sw->ar_rnat;
351 nat_mask = (1UL << ia64_rse_slot_num(addr));
352 break;
353 }
354 } else {
355 addr = &info->sw->r4 + (regnum - 4);
356 nat_addr = &info->sw->ar_unat;
357 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
358 }
359 } else {
360 /* access a scratch register */
361 pt = get_scratch_regs(info);
362 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
363 if (info->pri_unat_loc)
364 nat_addr = info->pri_unat_loc;
365 else
b833961b 366 nat_addr = &info->sw->caller_unat;
1da177e4
LT
367 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
368 }
369 } else {
370 /* access a stacked register */
371 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
372 nat_addr = ia64_rse_rnat_addr(addr);
373 if ((unsigned long) addr < info->regstk.limit
374 || (unsigned long) addr >= info->regstk.top)
375 {
376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
377 "of rbs\n", __FUNCTION__);
378 return -1;
379 }
380 if ((unsigned long) nat_addr >= info->regstk.top)
381 nat_addr = &info->sw->ar_rnat;
382 nat_mask = (1UL << ia64_rse_slot_num(addr));
383 }
384
385 if (write) {
386 if (read_only(addr)) {
387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
388 __FUNCTION__);
389 } else {
390 *addr = *val;
391 if (*nat)
392 *nat_addr |= nat_mask;
393 else
394 *nat_addr &= ~nat_mask;
395 }
396 } else {
397 if ((*nat_addr & nat_mask) == 0) {
398 *val = *addr;
399 *nat = 0;
400 } else {
401 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
402 *nat = 1;
403 }
404 }
405 return 0;
406}
407EXPORT_SYMBOL(unw_access_gr);
408
409int
410unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
411{
412 unsigned long *addr;
413 struct pt_regs *pt;
414
415 switch (regnum) {
416 /* scratch: */
417 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
418 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
419 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
420
421 /* preserved: */
422 case 1: case 2: case 3: case 4: case 5:
423 addr = *(&info->b1_loc + (regnum - 1));
424 if (!addr)
425 addr = &info->sw->b1 + (regnum - 1);
426 break;
427
428 default:
429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
430 __FUNCTION__, regnum);
431 return -1;
432 }
433 if (write)
434 if (read_only(addr)) {
435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
436 __FUNCTION__);
437 } else
438 *addr = *val;
439 else
440 *val = *addr;
441 return 0;
442}
443EXPORT_SYMBOL(unw_access_br);
444
445int
446unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
447{
448 struct ia64_fpreg *addr = NULL;
449 struct pt_regs *pt;
450
451 if ((unsigned) (regnum - 2) >= 126) {
452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
453 __FUNCTION__, regnum);
454 return -1;
455 }
456
457 if (regnum <= 5) {
458 addr = *(&info->f2_loc + (regnum - 2));
459 if (!addr)
460 addr = &info->sw->f2 + (regnum - 2);
461 } else if (regnum <= 15) {
462 if (regnum <= 11) {
463 pt = get_scratch_regs(info);
464 addr = &pt->f6 + (regnum - 6);
465 }
466 else
467 addr = &info->sw->f12 + (regnum - 12);
468 } else if (regnum <= 31) {
469 addr = info->fr_loc[regnum - 16];
470 if (!addr)
471 addr = &info->sw->f16 + (regnum - 16);
472 } else {
473 struct task_struct *t = info->task;
474
475 if (write)
476 ia64_sync_fph(t);
477 else
478 ia64_flush_fph(t);
479 addr = t->thread.fph + (regnum - 32);
480 }
481
482 if (write)
483 if (read_only(addr)) {
484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
485 __FUNCTION__);
486 } else
487 *addr = *val;
488 else
489 *val = *addr;
490 return 0;
491}
492EXPORT_SYMBOL(unw_access_fr);
493
494int
495unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
496{
497 unsigned long *addr;
498 struct pt_regs *pt;
499
500 switch (regnum) {
501 case UNW_AR_BSP:
502 addr = info->bsp_loc;
503 if (!addr)
504 addr = &info->sw->ar_bspstore;
505 break;
506
507 case UNW_AR_BSPSTORE:
508 addr = info->bspstore_loc;
509 if (!addr)
510 addr = &info->sw->ar_bspstore;
511 break;
512
513 case UNW_AR_PFS:
514 addr = info->pfs_loc;
515 if (!addr)
516 addr = &info->sw->ar_pfs;
517 break;
518
519 case UNW_AR_RNAT:
520 addr = info->rnat_loc;
521 if (!addr)
522 addr = &info->sw->ar_rnat;
523 break;
524
525 case UNW_AR_UNAT:
526 addr = info->unat_loc;
527 if (!addr)
b833961b 528 addr = &info->sw->caller_unat;
1da177e4
LT
529 break;
530
531 case UNW_AR_LC:
532 addr = info->lc_loc;
533 if (!addr)
534 addr = &info->sw->ar_lc;
535 break;
536
537 case UNW_AR_EC:
538 if (!info->cfm_loc)
539 return -1;
540 if (write)
541 *info->cfm_loc =
542 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
543 else
544 *val = (*info->cfm_loc >> 52) & 0x3f;
545 return 0;
546
547 case UNW_AR_FPSR:
548 addr = info->fpsr_loc;
549 if (!addr)
550 addr = &info->sw->ar_fpsr;
551 break;
552
553 case UNW_AR_RSC:
554 pt = get_scratch_regs(info);
555 addr = &pt->ar_rsc;
556 break;
557
558 case UNW_AR_CCV:
559 pt = get_scratch_regs(info);
560 addr = &pt->ar_ccv;
561 break;
562
563 case UNW_AR_CSD:
564 pt = get_scratch_regs(info);
565 addr = &pt->ar_csd;
566 break;
567
568 case UNW_AR_SSD:
569 pt = get_scratch_regs(info);
570 addr = &pt->ar_ssd;
571 break;
572
573 default:
574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
575 __FUNCTION__, regnum);
576 return -1;
577 }
578
579 if (write) {
580 if (read_only(addr)) {
581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
582 __FUNCTION__);
583 } else
584 *addr = *val;
585 } else
586 *val = *addr;
587 return 0;
588}
589EXPORT_SYMBOL(unw_access_ar);
590
591int
592unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
593{
594 unsigned long *addr;
595
596 addr = info->pr_loc;
597 if (!addr)
598 addr = &info->sw->pr;
599
600 if (write) {
601 if (read_only(addr)) {
602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
603 __FUNCTION__);
604 } else
605 *addr = *val;
606 } else
607 *val = *addr;
608 return 0;
609}
610EXPORT_SYMBOL(unw_access_pr);
611
612\f
613/* Routines to manipulate the state stack. */
614
615static inline void
616push (struct unw_state_record *sr)
617{
618 struct unw_reg_state *rs;
619
620 rs = alloc_reg_state();
621 if (!rs) {
622 printk(KERN_ERR "unwind: cannot stack reg state!\n");
623 return;
624 }
625 memcpy(rs, &sr->curr, sizeof(*rs));
626 sr->curr.next = rs;
627}
628
629static void
630pop (struct unw_state_record *sr)
631{
632 struct unw_reg_state *rs = sr->curr.next;
633
634 if (!rs) {
635 printk(KERN_ERR "unwind: stack underflow!\n");
636 return;
637 }
638 memcpy(&sr->curr, rs, sizeof(*rs));
639 free_reg_state(rs);
640}
641
642/* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
643static struct unw_reg_state *
644dup_state_stack (struct unw_reg_state *rs)
645{
646 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
647
648 while (rs) {
649 copy = alloc_reg_state();
650 if (!copy) {
651 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
652 return NULL;
653 }
654 memcpy(copy, rs, sizeof(*copy));
655 if (first)
656 prev->next = copy;
657 else
658 first = copy;
659 rs = rs->next;
660 prev = copy;
661 }
662 return first;
663}
664
665/* Free all stacked register states (but not RS itself). */
666static void
667free_state_stack (struct unw_reg_state *rs)
668{
669 struct unw_reg_state *p, *next;
670
671 for (p = rs->next; p != NULL; p = next) {
672 next = p->next;
673 free_reg_state(p);
674 }
675 rs->next = NULL;
676}
677\f
678/* Unwind decoder routines */
679
680static enum unw_register_index __attribute_const__
681decode_abreg (unsigned char abreg, int memory)
682{
683 switch (abreg) {
684 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
685 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
686 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
687 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
688 case 0x60: return UNW_REG_PR;
689 case 0x61: return UNW_REG_PSP;
690 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
691 case 0x63: return UNW_REG_RP;
692 case 0x64: return UNW_REG_BSP;
693 case 0x65: return UNW_REG_BSPSTORE;
694 case 0x66: return UNW_REG_RNAT;
695 case 0x67: return UNW_REG_UNAT;
696 case 0x68: return UNW_REG_FPSR;
697 case 0x69: return UNW_REG_PFS;
698 case 0x6a: return UNW_REG_LC;
699 default:
700 break;
701 }
702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
703 return UNW_REG_LC;
704}
705
706static void
707set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
708{
709 reg->val = val;
710 reg->where = where;
711 if (reg->when == UNW_WHEN_NEVER)
712 reg->when = when;
713}
714
715static void
716alloc_spill_area (unsigned long *offp, unsigned long regsize,
717 struct unw_reg_info *lo, struct unw_reg_info *hi)
718{
719 struct unw_reg_info *reg;
720
721 for (reg = hi; reg >= lo; --reg) {
722 if (reg->where == UNW_WHERE_SPILL_HOME) {
723 reg->where = UNW_WHERE_PSPREL;
724 *offp -= regsize;
725 reg->val = *offp;
726 }
727 }
728}
729
730static inline void
731spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
732{
733 struct unw_reg_info *reg;
734
735 for (reg = *regp; reg <= lim; ++reg) {
736 if (reg->where == UNW_WHERE_SPILL_HOME) {
737 reg->when = t;
738 *regp = reg + 1;
739 return;
740 }
741 }
742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
743}
744
745static inline void
746finish_prologue (struct unw_state_record *sr)
747{
748 struct unw_reg_info *reg;
749 unsigned long off;
750 int i;
751
752 /*
753 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
754 * for Using Unwind Descriptors", rule 3):
755 */
756 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
757 reg = sr->curr.reg + unw.save_order[i];
758 if (reg->where == UNW_WHERE_GR_SAVE) {
759 reg->where = UNW_WHERE_GR;
760 reg->val = sr->gr_save_loc++;
761 }
762 }
763
764 /*
765 * Next, compute when the fp, general, and branch registers get
766 * saved. This must come before alloc_spill_area() because
767 * we need to know which registers are spilled to their home
768 * locations.
769 */
770 if (sr->imask) {
771 unsigned char kind, mask = 0, *cp = sr->imask;
772 int t;
773 static const unsigned char limit[3] = {
774 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
775 };
776 struct unw_reg_info *(regs[3]);
777
778 regs[0] = sr->curr.reg + UNW_REG_F2;
779 regs[1] = sr->curr.reg + UNW_REG_R4;
780 regs[2] = sr->curr.reg + UNW_REG_B1;
781
782 for (t = 0; t < sr->region_len; ++t) {
783 if ((t & 3) == 0)
784 mask = *cp++;
785 kind = (mask >> 2*(3-(t & 3))) & 3;
786 if (kind > 0)
787 spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
788 sr->region_start + t);
789 }
790 }
791 /*
792 * Next, lay out the memory stack spill area:
793 */
794 if (sr->any_spills) {
795 off = sr->spill_offset;
796 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
797 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
798 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
799 }
800}
801
802/*
803 * Region header descriptors.
804 */
805
806static void
807desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
808 struct unw_state_record *sr)
809{
810 int i, region_start;
811
812 if (!(sr->in_body || sr->first_region))
813 finish_prologue(sr);
814 sr->first_region = 0;
815
816 /* check if we're done: */
817 if (sr->when_target < sr->region_start + sr->region_len) {
818 sr->done = 1;
819 return;
820 }
821
822 region_start = sr->region_start + sr->region_len;
823
824 for (i = 0; i < sr->epilogue_count; ++i)
825 pop(sr);
826 sr->epilogue_count = 0;
827 sr->epilogue_start = UNW_WHEN_NEVER;
828
829 sr->region_start = region_start;
830 sr->region_len = rlen;
831 sr->in_body = body;
832
833 if (!body) {
834 push(sr);
835
836 for (i = 0; i < 4; ++i) {
837 if (mask & 0x8)
838 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
839 sr->region_start + sr->region_len - 1, grsave++);
840 mask <<= 1;
841 }
842 sr->gr_save_loc = grsave;
843 sr->any_spills = 0;
844 sr->imask = NULL;
845 sr->spill_offset = 0x10; /* default to psp+16 */
846 }
847}
848
849/*
850 * Prologue descriptors.
851 */
852
853static inline void
854desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
855{
856 if (abi == 3 && context == 'i') {
857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
859 }
860 else
861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
862 __FUNCTION__, abi, context);
863}
864
865static inline void
866desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
867{
868 int i;
869
870 for (i = 0; i < 5; ++i) {
871 if (brmask & 1)
872 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
873 sr->region_start + sr->region_len - 1, gr++);
874 brmask >>= 1;
875 }
876}
877
878static inline void
879desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
880{
881 int i;
882
883 for (i = 0; i < 5; ++i) {
884 if (brmask & 1) {
885 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
886 sr->region_start + sr->region_len - 1, 0);
887 sr->any_spills = 1;
888 }
889 brmask >>= 1;
890 }
891}
892
893static inline void
894desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
895{
896 int i;
897
898 for (i = 0; i < 4; ++i) {
899 if ((grmask & 1) != 0) {
900 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
901 sr->region_start + sr->region_len - 1, 0);
902 sr->any_spills = 1;
903 }
904 grmask >>= 1;
905 }
906 for (i = 0; i < 20; ++i) {
907 if ((frmask & 1) != 0) {
908 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
909 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
910 sr->region_start + sr->region_len - 1, 0);
911 sr->any_spills = 1;
912 }
913 frmask >>= 1;
914 }
915}
916
917static inline void
918desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
919{
920 int i;
921
922 for (i = 0; i < 4; ++i) {
923 if ((frmask & 1) != 0) {
924 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
925 sr->region_start + sr->region_len - 1, 0);
926 sr->any_spills = 1;
927 }
928 frmask >>= 1;
929 }
930}
931
932static inline void
933desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
934{
935 int i;
936
937 for (i = 0; i < 4; ++i) {
938 if ((grmask & 1) != 0)
939 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
940 sr->region_start + sr->region_len - 1, gr++);
941 grmask >>= 1;
942 }
943}
944
945static inline void
946desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
947{
948 int i;
949
950 for (i = 0; i < 4; ++i) {
951 if ((grmask & 1) != 0) {
952 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
953 sr->region_start + sr->region_len - 1, 0);
954 sr->any_spills = 1;
955 }
956 grmask >>= 1;
957 }
958}
959
960static inline void
961desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
962{
963 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
964 sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
965}
966
967static inline void
968desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
969{
970 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
971}
972
973static inline void
974desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
975{
976 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
977}
978
979static inline void
980desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
981{
982 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
983 0x10 - 4*pspoff);
984}
985
986static inline void
987desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
988{
989 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
990 4*spoff);
991}
992
993static inline void
994desc_rp_br (unsigned char dst, struct unw_state_record *sr)
995{
996 sr->return_link_reg = dst;
997}
998
999static inline void
1000desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1001{
1002 struct unw_reg_info *reg = sr->curr.reg + regnum;
1003
1004 if (reg->where == UNW_WHERE_NONE)
1005 reg->where = UNW_WHERE_GR_SAVE;
1006 reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1007}
1008
1009static inline void
1010desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1011{
1012 sr->spill_offset = 0x10 - 4*pspoff;
1013}
1014
1015static inline unsigned char *
1016desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1017{
1018 sr->imask = imaskp;
1019 return imaskp + (2*sr->region_len + 7)/8;
1020}
1021
1022/*
1023 * Body descriptors.
1024 */
1025static inline void
1026desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1027{
1028 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1029 sr->epilogue_count = ecount + 1;
1030}
1031
1032static inline void
1033desc_copy_state (unw_word label, struct unw_state_record *sr)
1034{
1035 struct unw_labeled_state *ls;
1036
1037 for (ls = sr->labeled_states; ls; ls = ls->next) {
1038 if (ls->label == label) {
1039 free_state_stack(&sr->curr);
1040 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1041 sr->curr.next = dup_state_stack(ls->saved_state.next);
1042 return;
1043 }
1044 }
1045 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1046}
1047
1048static inline void
1049desc_label_state (unw_word label, struct unw_state_record *sr)
1050{
1051 struct unw_labeled_state *ls;
1052
1053 ls = alloc_labeled_state();
1054 if (!ls) {
1055 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1056 return;
1057 }
1058 ls->label = label;
1059 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1060 ls->saved_state.next = dup_state_stack(sr->curr.next);
1061
1062 /* insert into list of labeled states: */
1063 ls->next = sr->labeled_states;
1064 sr->labeled_states = ls;
1065}
1066
1067/*
1068 * General descriptors.
1069 */
1070
1071static inline int
1072desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1073{
1074 if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1075 return 0;
1076 if (qp > 0) {
1077 if ((sr->pr_val & (1UL << qp)) == 0)
1078 return 0;
1079 sr->pr_mask |= (1UL << qp);
1080 }
1081 return 1;
1082}
1083
1084static inline void
1085desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1086{
1087 struct unw_reg_info *r;
1088
1089 if (!desc_is_active(qp, t, sr))
1090 return;
1091
1092 r = sr->curr.reg + decode_abreg(abreg, 0);
1093 r->where = UNW_WHERE_NONE;
1094 r->when = UNW_WHEN_NEVER;
1095 r->val = 0;
1096}
1097
1098static inline void
1099desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1100 unsigned char ytreg, struct unw_state_record *sr)
1101{
1102 enum unw_where where = UNW_WHERE_GR;
1103 struct unw_reg_info *r;
1104
1105 if (!desc_is_active(qp, t, sr))
1106 return;
1107
1108 if (x)
1109 where = UNW_WHERE_BR;
1110 else if (ytreg & 0x80)
1111 where = UNW_WHERE_FR;
1112
1113 r = sr->curr.reg + decode_abreg(abreg, 0);
1114 r->where = where;
1115 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1116 r->val = (ytreg & 0x7f);
1117}
1118
1119static inline void
1120desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1121 struct unw_state_record *sr)
1122{
1123 struct unw_reg_info *r;
1124
1125 if (!desc_is_active(qp, t, sr))
1126 return;
1127
1128 r = sr->curr.reg + decode_abreg(abreg, 1);
1129 r->where = UNW_WHERE_PSPREL;
1130 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1131 r->val = 0x10 - 4*pspoff;
1132}
1133
1134static inline void
1135desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1136 struct unw_state_record *sr)
1137{
1138 struct unw_reg_info *r;
1139
1140 if (!desc_is_active(qp, t, sr))
1141 return;
1142
1143 r = sr->curr.reg + decode_abreg(abreg, 1);
1144 r->where = UNW_WHERE_SPREL;
1145 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1146 r->val = 4*spoff;
1147}
1148
1149#define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1150 code);
1151
1152/*
1153 * region headers:
1154 */
1155#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1156#define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1157/*
1158 * prologue descriptors:
1159 */
1160#define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1161#define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1162#define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1163#define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1164#define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1165#define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1166#define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1167#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1168#define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1169#define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1170#define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1171#define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1172#define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1173#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1174#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1175#define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1176#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1178#define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1179#define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1180#define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1181/*
1182 * body descriptors:
1183 */
1184#define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1185#define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1186#define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1187/*
1188 * general unwind descriptors:
1189 */
1190#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1191#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1192#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1193#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1194#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1195#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1196#define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1197#define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1198
1199#include "unwind_decoder.c"
1200
1201\f
1202/* Unwind scripts. */
1203
1204static inline unw_hash_index_t
1205hash (unsigned long ip)
1206{
1207# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
1208
1209 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1210#undef hashmagic
1211}
1212
1213static inline long
1214cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1215{
1216 read_lock(&script->lock);
1217 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1218 /* keep the read lock... */
1219 return 1;
1220 read_unlock(&script->lock);
1221 return 0;
1222}
1223
1224static inline struct unw_script *
1225script_lookup (struct unw_frame_info *info)
1226{
1227 struct unw_script *script = unw.cache + info->hint;
1228 unsigned short index;
1229 unsigned long ip, pr;
1230
1231 if (UNW_DEBUG_ON(0))
1232 return NULL; /* Always regenerate scripts in debug mode */
1233
1234 STAT(++unw.stat.cache.lookups);
1235
1236 ip = info->ip;
1237 pr = info->pr;
1238
1239 if (cache_match(script, ip, pr)) {
1240 STAT(++unw.stat.cache.hinted_hits);
1241 return script;
1242 }
1243
1244 index = unw.hash[hash(ip)];
1245 if (index >= UNW_CACHE_SIZE)
1246 return NULL;
1247
1248 script = unw.cache + index;
1249 while (1) {
1250 if (cache_match(script, ip, pr)) {
1251 /* update hint; no locking required as single-word writes are atomic */
1252 STAT(++unw.stat.cache.normal_hits);
1253 unw.cache[info->prev_script].hint = script - unw.cache;
1254 return script;
1255 }
1256 if (script->coll_chain >= UNW_HASH_SIZE)
1257 return NULL;
1258 script = unw.cache + script->coll_chain;
1259 STAT(++unw.stat.cache.collision_chain_traversals);
1260 }
1261}
1262
1263/*
1264 * On returning, a write lock for the SCRIPT is still being held.
1265 */
1266static inline struct unw_script *
1267script_new (unsigned long ip)
1268{
1269 struct unw_script *script, *prev, *tmp;
1270 unw_hash_index_t index;
1271 unsigned short head;
1272
1273 STAT(++unw.stat.script.news);
1274
1275 /*
1276 * Can't (easily) use cmpxchg() here because of ABA problem
1277 * that is intrinsic in cmpxchg()...
1278 */
1279 head = unw.lru_head;
1280 script = unw.cache + head;
1281 unw.lru_head = script->lru_chain;
1282
1283 /*
1284 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1285 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1286 * alternative would be to disable interrupts whenever we hold a read-lock, but
1287 * that seems silly.
1288 */
1289 if (!write_trylock(&script->lock))
1290 return NULL;
1291
1292 /* re-insert script at the tail of the LRU chain: */
1293 unw.cache[unw.lru_tail].lru_chain = head;
1294 unw.lru_tail = head;
1295
1296 /* remove the old script from the hash table (if it's there): */
1297 if (script->ip) {
1298 index = hash(script->ip);
1299 tmp = unw.cache + unw.hash[index];
1300 prev = NULL;
1301 while (1) {
1302 if (tmp == script) {
1303 if (prev)
1304 prev->coll_chain = tmp->coll_chain;
1305 else
1306 unw.hash[index] = tmp->coll_chain;
1307 break;
1308 } else
1309 prev = tmp;
1310 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1311 /* old script wasn't in the hash-table */
1312 break;
1313 tmp = unw.cache + tmp->coll_chain;
1314 }
1315 }
1316
1317 /* enter new script in the hash table */
1318 index = hash(ip);
1319 script->coll_chain = unw.hash[index];
1320 unw.hash[index] = script - unw.cache;
1321
1322 script->ip = ip; /* set new IP while we're holding the locks */
1323
1324 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1325
1326 script->flags = 0;
1327 script->hint = 0;
1328 script->count = 0;
1329 return script;
1330}
1331
1332static void
1333script_finalize (struct unw_script *script, struct unw_state_record *sr)
1334{
1335 script->pr_mask = sr->pr_mask;
1336 script->pr_val = sr->pr_val;
1337 /*
1338 * We could down-grade our write-lock on script->lock here but
1339 * the rwlock API doesn't offer atomic lock downgrading, so
1340 * we'll just keep the write-lock and release it later when
1341 * we're done using the script.
1342 */
1343}
1344
1345static inline void
1346script_emit (struct unw_script *script, struct unw_insn insn)
1347{
1348 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1351 return;
1352 }
1353 script->insn[script->count++] = insn;
1354}
1355
1356static inline void
1357emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1358{
1359 struct unw_reg_info *r = sr->curr.reg + i;
1360 enum unw_insn_opcode opc;
1361 struct unw_insn insn;
1362 unsigned long val = 0;
1363
1364 switch (r->where) {
1365 case UNW_WHERE_GR:
1366 if (r->val >= 32) {
1367 /* register got spilled to a stacked register */
1368 opc = UNW_INSN_SETNAT_TYPE;
1369 val = UNW_NAT_REGSTK;
1370 } else
1371 /* register got spilled to a scratch register */
1372 opc = UNW_INSN_SETNAT_MEMSTK;
1373 break;
1374
1375 case UNW_WHERE_FR:
1376 opc = UNW_INSN_SETNAT_TYPE;
1377 val = UNW_NAT_VAL;
1378 break;
1379
1380 case UNW_WHERE_BR:
1381 opc = UNW_INSN_SETNAT_TYPE;
1382 val = UNW_NAT_NONE;
1383 break;
1384
1385 case UNW_WHERE_PSPREL:
1386 case UNW_WHERE_SPREL:
1387 opc = UNW_INSN_SETNAT_MEMSTK;
1388 break;
1389
1390 default:
1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392 __FUNCTION__, r->where);
1393 return;
1394 }
1395 insn.opc = opc;
1396 insn.dst = unw.preg_index[i];
1397 insn.val = val;
1398 script_emit(script, insn);
1399}
1400
1401static void
1402compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1403{
1404 struct unw_reg_info *r = sr->curr.reg + i;
1405 enum unw_insn_opcode opc;
1406 unsigned long val, rval;
1407 struct unw_insn insn;
1408 long need_nat_info;
1409
1410 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1411 return;
1412
1413 opc = UNW_INSN_MOVE;
1414 val = rval = r->val;
1415 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1416
1417 switch (r->where) {
1418 case UNW_WHERE_GR:
1419 if (rval >= 32) {
1420 opc = UNW_INSN_MOVE_STACKED;
1421 val = rval - 32;
1422 } else if (rval >= 4 && rval <= 7) {
1423 if (need_nat_info) {
1424 opc = UNW_INSN_MOVE2;
1425 need_nat_info = 0;
1426 }
1427 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1428 } else if (rval == 0) {
1429 opc = UNW_INSN_MOVE_CONST;
1430 val = 0;
1431 } else {
1432 /* register got spilled to a scratch register */
1433 opc = UNW_INSN_MOVE_SCRATCH;
1434 val = pt_regs_off(rval);
1435 }
1436 break;
1437
1438 case UNW_WHERE_FR:
1439 if (rval <= 5)
1440 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1441 else if (rval >= 16 && rval <= 31)
1442 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1443 else {
1444 opc = UNW_INSN_MOVE_SCRATCH;
1445 if (rval <= 11)
1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1447 else
1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449 __FUNCTION__, rval);
1450 }
1451 break;
1452
1453 case UNW_WHERE_BR:
1454 if (rval >= 1 && rval <= 5)
1455 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1456 else {
1457 opc = UNW_INSN_MOVE_SCRATCH;
1458 if (rval == 0)
1459 val = offsetof(struct pt_regs, b0);
1460 else if (rval == 6)
1461 val = offsetof(struct pt_regs, b6);
1462 else
1463 val = offsetof(struct pt_regs, b7);
1464 }
1465 break;
1466
1467 case UNW_WHERE_SPREL:
1468 opc = UNW_INSN_ADD_SP;
1469 break;
1470
1471 case UNW_WHERE_PSPREL:
1472 opc = UNW_INSN_ADD_PSP;
1473 break;
1474
1475 default:
1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477 __FUNCTION__, i, r->where);
1478 break;
1479 }
1480 insn.opc = opc;
1481 insn.dst = unw.preg_index[i];
1482 insn.val = val;
1483 script_emit(script, insn);
1484 if (need_nat_info)
1485 emit_nat_info(sr, i, script);
1486
1487 if (i == UNW_REG_PSP) {
1488 /*
1489 * info->psp must contain the _value_ of the previous
1490 * sp, not it's save location. We get this by
1491 * dereferencing the value we just stored in
1492 * info->psp:
1493 */
1494 insn.opc = UNW_INSN_LOAD;
1495 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1496 script_emit(script, insn);
1497 }
1498}
1499
1500static inline const struct unw_table_entry *
1501lookup (struct unw_table *table, unsigned long rel_ip)
1502{
1503 const struct unw_table_entry *e = NULL;
1504 unsigned long lo, hi, mid;
1505
1506 /* do a binary search for right entry: */
1507 for (lo = 0, hi = table->length; lo < hi; ) {
1508 mid = (lo + hi) / 2;
1509 e = &table->array[mid];
1510 if (rel_ip < e->start_offset)
1511 hi = mid;
1512 else if (rel_ip >= e->end_offset)
1513 lo = mid + 1;
1514 else
1515 break;
1516 }
1517 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1518 return NULL;
1519 return e;
1520}
1521
1522/*
1523 * Build an unwind script that unwinds from state OLD_STATE to the
1524 * entrypoint of the function that called OLD_STATE.
1525 */
1526static inline struct unw_script *
1527build_script (struct unw_frame_info *info)
1528{
1529 const struct unw_table_entry *e = NULL;
1530 struct unw_script *script = NULL;
1531 struct unw_labeled_state *ls, *next;
1532 unsigned long ip = info->ip;
1533 struct unw_state_record sr;
1534 struct unw_table *table;
1535 struct unw_reg_info *r;
1536 struct unw_insn insn;
1537 u8 *dp, *desc_end;
1538 u64 hdr;
1539 int i;
1540 STAT(unsigned long start, parse_start;)
1541
1542 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1543
1544 /* build state record */
1545 memset(&sr, 0, sizeof(sr));
1546 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1547 r->when = UNW_WHEN_NEVER;
1548 sr.pr_val = info->pr;
1549
1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1551 script = script_new(ip);
1552 if (!script) {
1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1555 return NULL;
1556 }
1557 unw.cache[info->prev_script].hint = script - unw.cache;
1558
1559 /* search the kernels and the modules' unwind tables for IP: */
1560
1561 STAT(parse_start = ia64_get_itc());
1562
1563 for (table = unw.tables; table; table = table->next) {
1564 if (ip >= table->start && ip < table->end) {
1565 e = lookup(table, ip - table->segment_base);
1566 break;
1567 }
1568 }
1569 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1572 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1574 sr.curr.reg[UNW_REG_RP].when = -1;
1575 sr.curr.reg[UNW_REG_RP].val = 0;
1576 compile_reg(&sr, UNW_REG_RP, script);
1577 script_finalize(script, &sr);
1578 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1579 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1580 return script;
1581 }
1582
1583 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1584 + (ip & 0xfUL));
1585 hdr = *(u64 *) (table->segment_base + e->info_offset);
1586 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1587 desc_end = dp + 8*UNW_LENGTH(hdr);
1588
1589 while (!sr.done && dp < desc_end)
1590 dp = unw_decode(dp, sr.in_body, &sr);
1591
1592 if (sr.when_target > sr.epilogue_start) {
1593 /*
1594 * sp has been restored and all values on the memory stack below
1595 * psp also have been restored.
1596 */
1597 sr.curr.reg[UNW_REG_PSP].val = 0;
1598 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1599 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1600 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1601 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1602 || r->where == UNW_WHERE_SPREL)
1603 {
1604 r->val = 0;
1605 r->where = UNW_WHERE_NONE;
1606 r->when = UNW_WHEN_NEVER;
1607 }
1608 }
1609
1610 script->flags = sr.flags;
1611
1612 /*
1613 * If RP did't get saved, generate entry for the return link
1614 * register.
1615 */
1616 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1617 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1618 sr.curr.reg[UNW_REG_RP].when = -1;
1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1621 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1622 sr.curr.reg[UNW_REG_RP].val);
1623 }
1624
1625#ifdef UNW_DEBUG
1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1627 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1631 switch (r->where) {
1632 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1633 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1634 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1635 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1636 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1637 case UNW_WHERE_NONE:
1638 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1639 break;
1640
1641 default:
1642 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1643 break;
1644 }
1645 UNW_DPRINT(1, "\t\t%d\n", r->when);
1646 }
1647 }
1648#endif
1649
1650 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1651
1652 /* translate state record into unwinder instructions: */
1653
1654 /*
1655 * First, set psp if we're dealing with a fixed-size frame;
1656 * subsequent instructions may depend on this value.
1657 */
1658 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1659 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1660 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1661 /* new psp is sp plus frame size */
1662 insn.opc = UNW_INSN_ADD;
1663 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1664 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1665 script_emit(script, insn);
1666 }
1667
1668 /* determine where the primary UNaT is: */
1669 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1670 i = UNW_REG_PRI_UNAT_MEM;
1671 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1672 i = UNW_REG_PRI_UNAT_GR;
1673 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1674 i = UNW_REG_PRI_UNAT_MEM;
1675 else
1676 i = UNW_REG_PRI_UNAT_GR;
1677
1678 compile_reg(&sr, i, script);
1679
1680 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1681 compile_reg(&sr, i, script);
1682
1683 /* free labeled register states & stack: */
1684
1685 STAT(parse_start = ia64_get_itc());
1686 for (ls = sr.labeled_states; ls; ls = next) {
1687 next = ls->next;
1688 free_state_stack(&ls->saved_state);
1689 free_labeled_state(ls);
1690 }
1691 free_state_stack(&sr.curr);
1692 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1693
1694 script_finalize(script, &sr);
1695 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1696 return script;
1697}
1698
1699/*
1700 * Apply the unwinding actions represented by OPS and update SR to
1701 * reflect the state that existed upon entry to the function that this
1702 * unwinder represents.
1703 */
1704static inline void
1705run_script (struct unw_script *script, struct unw_frame_info *state)
1706{
1707 struct unw_insn *ip, *limit, next_insn;
1708 unsigned long opc, dst, val, off;
1709 unsigned long *s = (unsigned long *) state;
1710 STAT(unsigned long start;)
1711
1712 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1713 state->flags = script->flags;
1714 ip = script->insn;
1715 limit = script->insn + script->count;
1716 next_insn = *ip;
1717
1718 while (ip++ < limit) {
1719 opc = next_insn.opc;
1720 dst = next_insn.dst;
1721 val = next_insn.val;
1722 next_insn = *ip;
1723
1724 redo:
1725 switch (opc) {
1726 case UNW_INSN_ADD:
1727 s[dst] += val;
1728 break;
1729
1730 case UNW_INSN_MOVE2:
1731 if (!s[val])
1732 goto lazy_init;
1733 s[dst+1] = s[val+1];
1734 s[dst] = s[val];
1735 break;
1736
1737 case UNW_INSN_MOVE:
1738 if (!s[val])
1739 goto lazy_init;
1740 s[dst] = s[val];
1741 break;
1742
1743 case UNW_INSN_MOVE_SCRATCH:
1744 if (state->pt) {
1745 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1746 } else {
1747 s[dst] = 0;
1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1749 __FUNCTION__, dst, val);
1750 }
1751 break;
1752
1753 case UNW_INSN_MOVE_CONST:
1754 if (val == 0)
1755 s[dst] = (unsigned long) &unw.r0;
1756 else {
1757 s[dst] = 0;
1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1759 __FUNCTION__, val);
1760 }
1761 break;
1762
1763
1764 case UNW_INSN_MOVE_STACKED:
1765 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1766 val);
1767 break;
1768
1769 case UNW_INSN_ADD_PSP:
1770 s[dst] = state->psp + val;
1771 break;
1772
1773 case UNW_INSN_ADD_SP:
1774 s[dst] = state->sp + val;
1775 break;
1776
1777 case UNW_INSN_SETNAT_MEMSTK:
1778 if (!state->pri_unat_loc)
b833961b 1779 state->pri_unat_loc = &state->sw->caller_unat;
1da177e4
LT
1780 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1781 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1782 break;
1783
1784 case UNW_INSN_SETNAT_TYPE:
1785 s[dst+1] = val;
1786 break;
1787
1788 case UNW_INSN_LOAD:
1789#ifdef UNW_DEBUG
1790 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1791 || s[val] < TASK_SIZE)
1792 {
1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1794 __FUNCTION__, s[val]);
1795 break;
1796 }
1797#endif
1798 s[dst] = *(unsigned long *) s[val];
1799 break;
1800 }
1801 }
1802 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1803 return;
1804
1805 lazy_init:
1806 off = unw.sw_off[val];
1807 s[val] = (unsigned long) state->sw + off;
1808 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1809 /*
1810 * We're initializing a general register: init NaT info, too. Note that
1811 * the offset is a multiple of 8 which gives us the 3 bits needed for
1812 * the type field.
1813 */
1814 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1815 goto redo;
1816}
1817
1818static int
1819find_save_locs (struct unw_frame_info *info)
1820{
1821 int have_write_lock = 0;
1822 struct unw_script *scr;
1823 unsigned long flags = 0;
1824
1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1826 /* don't let obviously bad addresses pollute the cache */
1827 /* FIXME: should really be level 0 but it occurs too often. KAO */
1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1829 info->rp_loc = NULL;
1830 return -1;
1831 }
1832
1833 scr = script_lookup(info);
1834 if (!scr) {
1835 spin_lock_irqsave(&unw.lock, flags);
1836 scr = build_script(info);
1837 if (!scr) {
1838 spin_unlock_irqrestore(&unw.lock, flags);
1839 UNW_DPRINT(0,
1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1841 __FUNCTION__, info->ip);
1842 return -1;
1843 }
1844 have_write_lock = 1;
1845 }
1846 info->hint = scr->hint;
1847 info->prev_script = scr - unw.cache;
1848
1849 run_script(scr, info);
1850
1851 if (have_write_lock) {
1852 write_unlock(&scr->lock);
1853 spin_unlock_irqrestore(&unw.lock, flags);
1854 } else
1855 read_unlock(&scr->lock);
1856 return 0;
1857}
1858
1859int
1860unw_unwind (struct unw_frame_info *info)
1861{
1862 unsigned long prev_ip, prev_sp, prev_bsp;
e2e6fe7b 1863 unsigned long ip, pr, num_regs, rp_loc, pfs_loc;
1da177e4
LT
1864 STAT(unsigned long start, flags;)
1865 int retval;
1866
1867 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1868
1869 prev_ip = info->ip;
1870 prev_sp = info->sp;
1871 prev_bsp = info->bsp;
1872
e2e6fe7b
RH
1873 /* validate the return IP pointer */
1874 rp_loc = (unsigned long) info->rp_loc;
1875 if ((rp_loc < info->regstk.limit) || (rp_loc > info->regstk.top)) {
1da177e4
LT
1876 /* FIXME: should really be level 0 but it occurs too often. KAO */
1877 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1878 __FUNCTION__, info->ip);
1879 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1880 return -1;
1881 }
e2e6fe7b 1882 /* restore the ip */
1da177e4
LT
1883 ip = info->ip = *info->rp_loc;
1884 if (ip < GATE_ADDR) {
1885 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1887 return -1;
1888 }
1889
e2e6fe7b
RH
1890 /* validate the previous stack frame pointer */
1891 pfs_loc = (unsigned long) info->pfs_loc;
1892 if ((pfs_loc < info->regstk.limit) || (pfs_loc > info->regstk.top)) {
1da177e4
LT
1893 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1894 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1895 return -1;
1896 }
e2e6fe7b 1897 /* restore the cfm: */
1da177e4
LT
1898 info->cfm_loc = info->pfs_loc;
1899
1900 /* restore the bsp: */
1901 pr = info->pr;
1902 num_regs = 0;
1903 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1904 info->pt = info->sp + 16;
1905 if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1906 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1907 info->pfs_loc =
1908 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1909 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1910 } else
1911 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1912 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1913 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1914 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1915 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1916 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1917 return -1;
1918 }
1919
1920 /* restore the sp: */
1921 info->sp = info->psp;
1922 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1923 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1924 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1925 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1926 return -1;
1927 }
1928
1929 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1930 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1931 __FUNCTION__, ip);
1932 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1933 return -1;
1934 }
1935
1936 /* as we unwind, the saved ar.unat becomes the primary unat: */
1937 info->pri_unat_loc = info->unat_loc;
1938
1939 /* finally, restore the predicates: */
1940 unw_get_pr(info, &info->pr);
1941
1942 retval = find_save_locs(info);
1943 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1944 return retval;
1945}
1946EXPORT_SYMBOL(unw_unwind);
1947
1948int
1949unw_unwind_to_user (struct unw_frame_info *info)
1950{
690def21 1951 unsigned long ip, sp, pr = info->pr;
1da177e4 1952
690def21 1953 do {
1da177e4 1954 unw_get_sp(info, &sp);
e8d1cb2f
KO
1955 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1956 < IA64_PT_REGS_SIZE) {
1957 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1958 __FUNCTION__);
1da177e4 1959 break;
e8d1cb2f
KO
1960 }
1961 if (unw_is_intr_frame(info) &&
1962 (pr & (1UL << PRED_USER_STACK)))
1da177e4 1963 return 0;
e8d1cb2f
KO
1964 if (unw_get_pr (info, &pr) < 0) {
1965 unw_get_rp(info, &ip);
1966 UNW_DPRINT(0, "unwind.%s: failed to read "
1967 "predicate register (ip=0x%lx)\n",
1968 __FUNCTION__, ip);
1969 return -1;
1970 }
690def21 1971 } while (unw_unwind(info) >= 0);
1da177e4 1972 unw_get_ip(info, &ip);
e8d1cb2f
KO
1973 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1974 __FUNCTION__, ip);
1da177e4
LT
1975 return -1;
1976}
1977EXPORT_SYMBOL(unw_unwind_to_user);
1978
1979static void
1980init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1981 struct switch_stack *sw, unsigned long stktop)
1982{
1983 unsigned long rbslimit, rbstop, stklimit;
1984 STAT(unsigned long start, flags;)
1985
1986 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1987
1988 /*
1989 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1990 * don't want to do that because it would be slow as each preserved register would
1991 * have to be processed. Instead, what we do here is zero out the frame info and
1992 * start the unwind process at the function that created the switch_stack frame.
1993 * When a preserved value in switch_stack needs to be accessed, run_script() will
1994 * initialize the appropriate pointer on demand.
1995 */
1996 memset(info, 0, sizeof(*info));
1997
1998 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
e2e6fe7b
RH
1999 stklimit = (unsigned long) t + IA64_STK_OFFSET;
2000
1da177e4 2001 rbstop = sw->ar_bspstore;
e2e6fe7b 2002 if (rbstop > stklimit || rbstop < rbslimit)
1da177e4
LT
2003 rbstop = rbslimit;
2004
1da177e4
LT
2005 if (stktop <= rbstop)
2006 stktop = rbstop;
e2e6fe7b
RH
2007 if (stktop > stklimit)
2008 stktop = stklimit;
1da177e4
LT
2009
2010 info->regstk.limit = rbslimit;
2011 info->regstk.top = rbstop;
2012 info->memstk.limit = stklimit;
2013 info->memstk.top = stktop;
2014 info->task = t;
2015 info->sw = sw;
2016 info->sp = info->psp = stktop;
2017 info->pr = sw->pr;
2018 UNW_DPRINT(3, "unwind.%s:\n"
2019 " task 0x%lx\n"
2020 " rbs = [0x%lx-0x%lx)\n"
2021 " stk = [0x%lx-0x%lx)\n"
2022 " pr 0x%lx\n"
2023 " sw 0x%lx\n"
2024 " sp 0x%lx\n",
2025 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2026 info->pr, (unsigned long) info->sw, info->sp);
2027 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2028}
2029
1da177e4
LT
2030void
2031unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2032{
2033 unsigned long sol;
2034
2035 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2036 info->cfm_loc = &sw->ar_pfs;
2037 sol = (*info->cfm_loc >> 7) & 0x7f;
2038 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2039 info->ip = sw->b0;
2040 UNW_DPRINT(3, "unwind.%s:\n"
2041 " bsp 0x%lx\n"
2042 " sol 0x%lx\n"
2043 " ip 0x%lx\n",
2044 __FUNCTION__, info->bsp, sol, info->ip);
2045 find_save_locs(info);
2046}
2047
2048EXPORT_SYMBOL(unw_init_frame_info);
2049
2050void
2051unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2052{
2053 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2054
2055 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2056 unw_init_frame_info(info, t, sw);
2057}
2058EXPORT_SYMBOL(unw_init_from_blocked_task);
2059
2060static void
2061init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2062 unsigned long gp, const void *table_start, const void *table_end)
2063{
2064 const struct unw_table_entry *start = table_start, *end = table_end;
2065
2066 table->name = name;
2067 table->segment_base = segment_base;
2068 table->gp = gp;
2069 table->start = segment_base + start[0].start_offset;
2070 table->end = segment_base + end[-1].end_offset;
2071 table->array = start;
2072 table->length = end - start;
2073}
2074
2075void *
2076unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2077 const void *table_start, const void *table_end)
2078{
2079 const struct unw_table_entry *start = table_start, *end = table_end;
2080 struct unw_table *table;
2081 unsigned long flags;
2082
2083 if (end - start <= 0) {
2084 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2085 __FUNCTION__);
2086 return NULL;
2087 }
2088
2089 table = kmalloc(sizeof(*table), GFP_USER);
2090 if (!table)
2091 return NULL;
2092
2093 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2094
2095 spin_lock_irqsave(&unw.lock, flags);
2096 {
2097 /* keep kernel unwind table at the front (it's searched most commonly): */
2098 table->next = unw.tables->next;
2099 unw.tables->next = table;
2100 }
2101 spin_unlock_irqrestore(&unw.lock, flags);
2102
2103 return table;
2104}
2105
2106void
2107unw_remove_unwind_table (void *handle)
2108{
2109 struct unw_table *table, *prev;
2110 struct unw_script *tmp;
2111 unsigned long flags;
2112 long index;
2113
2114 if (!handle) {
2115 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2116 __FUNCTION__);
2117 return;
2118 }
2119
2120 table = handle;
2121 if (table == &unw.kernel_table) {
2122 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2123 "no-can-do!\n", __FUNCTION__);
2124 return;
2125 }
2126
2127 spin_lock_irqsave(&unw.lock, flags);
2128 {
2129 /* first, delete the table: */
2130
2131 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2132 if (prev->next == table)
2133 break;
2134 if (!prev) {
2135 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2136 __FUNCTION__, (void *) table);
2137 spin_unlock_irqrestore(&unw.lock, flags);
2138 return;
2139 }
2140 prev->next = table->next;
2141 }
2142 spin_unlock_irqrestore(&unw.lock, flags);
2143
2144 /* next, remove hash table entries for this table */
2145
2146 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2147 tmp = unw.cache + unw.hash[index];
2148 if (unw.hash[index] >= UNW_CACHE_SIZE
2149 || tmp->ip < table->start || tmp->ip >= table->end)
2150 continue;
2151
2152 write_lock(&tmp->lock);
2153 {
2154 if (tmp->ip >= table->start && tmp->ip < table->end) {
2155 unw.hash[index] = tmp->coll_chain;
2156 tmp->ip = 0;
2157 }
2158 }
2159 write_unlock(&tmp->lock);
2160 }
2161
2162 kfree(table);
2163}
2164
2165static int __init
2166create_gate_table (void)
2167{
2168 const struct unw_table_entry *entry, *start, *end;
2169 unsigned long *lp, segbase = GATE_ADDR;
2170 size_t info_size, size;
2171 char *info;
2172 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2173 int i;
2174
2175 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2176 if (phdr->p_type == PT_IA_64_UNWIND) {
2177 punw = phdr;
2178 break;
2179 }
2180
2181 if (!punw) {
2182 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2183 return 0;
2184 }
2185
2186 start = (const struct unw_table_entry *) punw->p_vaddr;
2187 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2188 size = 0;
2189
2190 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2191
2192 for (entry = start; entry < end; ++entry)
2193 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2194 size += 8; /* reserve space for "end of table" marker */
2195
2196 unw.gate_table = kmalloc(size, GFP_KERNEL);
2197 if (!unw.gate_table) {
2198 unw.gate_table_size = 0;
2199 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2200 return 0;
2201 }
2202 unw.gate_table_size = size;
2203
2204 lp = unw.gate_table;
2205 info = (char *) unw.gate_table + size;
2206
2207 for (entry = start; entry < end; ++entry, lp += 3) {
2208 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2209 info -= info_size;
2210 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2211
2212 lp[0] = segbase + entry->start_offset; /* start */
2213 lp[1] = segbase + entry->end_offset; /* end */
2214 lp[2] = info - (char *) unw.gate_table; /* info */
2215 }
2216 *lp = 0; /* end-of-table marker */
2217 return 0;
2218}
2219
2220__initcall(create_gate_table);
2221
2222void __init
2223unw_init (void)
2224{
2225 extern char __gp[];
2226 extern void unw_hash_index_t_is_too_narrow (void);
2227 long i, off;
2228
2229 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2230 unw_hash_index_t_is_too_narrow();
2231
b833961b 2232 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
1da177e4 2233 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
b833961b 2234 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
1da177e4 2235 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
b833961b 2236 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
1da177e4
LT
2237 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2238 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2239 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2240 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2241 unw.sw_off[unw.preg_index[i]] = off;
2242 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2243 unw.sw_off[unw.preg_index[i]] = off;
2244 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2245 unw.sw_off[unw.preg_index[i]] = off;
2246 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2247 unw.sw_off[unw.preg_index[i]] = off;
2248
2249 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2250 if (i > 0)
2251 unw.cache[i].lru_chain = (i - 1);
2252 unw.cache[i].coll_chain = -1;
2253 rwlock_init(&unw.cache[i].lock);
2254 }
2255 unw.lru_head = UNW_CACHE_SIZE - 1;
2256 unw.lru_tail = 0;
2257
2258 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2259 __start_unwind, __end_unwind);
2260}
2261
2262/*
2263 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2264 *
2265 * This system call has been deprecated. The new and improved way to get
2266 * at the kernel's unwind info is via the gate DSO. The address of the
2267 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2268 *
2269 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2270 *
2271 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2272 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2273 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2274 * unwind data.
2275 *
2276 * The first portion of the unwind data contains an unwind table and rest contains the
2277 * associated unwind info (in no particular order). The unwind table consists of a table
2278 * of entries of the form:
2279 *
2280 * u64 start; (64-bit address of start of function)
2281 * u64 end; (64-bit address of start of function)
2282 * u64 info; (BUF-relative offset to unwind info)
2283 *
2284 * The end of the unwind table is indicated by an entry with a START address of zero.
2285 *
2286 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2287 * on the format of the unwind info.
2288 *
2289 * ERRORS
2290 * EFAULT BUF points outside your accessible address space.
2291 */
2292asmlinkage long
2293sys_getunwind (void __user *buf, size_t buf_size)
2294{
2295 if (buf && buf_size >= unw.gate_table_size)
2296 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2297 return -EFAULT;
2298 return unw.gate_table_size;
2299}