]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_PROCESSOR_H |
2 | #define _ASM_IA64_PROCESSOR_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | * Stephane Eranian <eranian@hpl.hp.com> | |
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
10 | * | |
11 | * 11/24/98 S.Eranian added ia64_set_iva() | |
12 | * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API | |
13 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support | |
14 | */ | |
15 | ||
16 | #include <linux/config.h> | |
17 | ||
18 | #include <asm/intrinsics.h> | |
19 | #include <asm/kregs.h> | |
20 | #include <asm/ptrace.h> | |
21 | #include <asm/ustack.h> | |
22 | ||
1da177e4 LT |
23 | #define IA64_NUM_DBG_REGS 8 |
24 | /* | |
25 | * Limits for PMC and PMD are set to less than maximum architected values | |
26 | * but should be sufficient for a while | |
27 | */ | |
9179cb65 SE |
28 | #define IA64_NUM_PMC_REGS 64 |
29 | #define IA64_NUM_PMD_REGS 64 | |
1da177e4 LT |
30 | |
31 | #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) | |
32 | #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) | |
33 | ||
34 | /* | |
35 | * TASK_SIZE really is a mis-named. It really is the maximum user | |
36 | * space address (plus one). On IA-64, there are five regions of 2TB | |
37 | * each (assuming 8KB page size), for a total of 8TB of user virtual | |
38 | * address space. | |
39 | */ | |
40 | #define TASK_SIZE (current->thread.task_size) | |
41 | ||
1da177e4 LT |
42 | /* |
43 | * This decides where the kernel will search for a free chunk of vm | |
44 | * space during mmap's. | |
45 | */ | |
46 | #define TASK_UNMAPPED_BASE (current->thread.map_base) | |
47 | ||
48 | #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ | |
49 | #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ | |
50 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ | |
51 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ | |
52 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ | |
e08e6c52 BC |
53 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration |
54 | sync at ctx sw */ | |
1da177e4 LT |
55 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ |
56 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ | |
57 | ||
58 | #define IA64_THREAD_UAC_SHIFT 3 | |
59 | #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) | |
60 | #define IA64_THREAD_FPEMU_SHIFT 6 | |
61 | #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) | |
62 | ||
63 | ||
64 | /* | |
65 | * This shift should be large enough to be able to represent 1000000000/itc_freq with good | |
66 | * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits | |
67 | * (this will give enough slack to represent 10 seconds worth of time as a scaled number). | |
68 | */ | |
69 | #define IA64_NSEC_PER_CYC_SHIFT 30 | |
70 | ||
71 | #ifndef __ASSEMBLY__ | |
72 | ||
73 | #include <linux/cache.h> | |
74 | #include <linux/compiler.h> | |
75 | #include <linux/threads.h> | |
76 | #include <linux/types.h> | |
77 | ||
78 | #include <asm/fpu.h> | |
79 | #include <asm/page.h> | |
80 | #include <asm/percpu.h> | |
81 | #include <asm/rse.h> | |
82 | #include <asm/unwind.h> | |
83 | #include <asm/atomic.h> | |
84 | #ifdef CONFIG_NUMA | |
85 | #include <asm/nodedata.h> | |
86 | #endif | |
87 | ||
88 | /* like above but expressed as bitfields for more efficient access: */ | |
89 | struct ia64_psr { | |
90 | __u64 reserved0 : 1; | |
91 | __u64 be : 1; | |
92 | __u64 up : 1; | |
93 | __u64 ac : 1; | |
94 | __u64 mfl : 1; | |
95 | __u64 mfh : 1; | |
96 | __u64 reserved1 : 7; | |
97 | __u64 ic : 1; | |
98 | __u64 i : 1; | |
99 | __u64 pk : 1; | |
100 | __u64 reserved2 : 1; | |
101 | __u64 dt : 1; | |
102 | __u64 dfl : 1; | |
103 | __u64 dfh : 1; | |
104 | __u64 sp : 1; | |
105 | __u64 pp : 1; | |
106 | __u64 di : 1; | |
107 | __u64 si : 1; | |
108 | __u64 db : 1; | |
109 | __u64 lp : 1; | |
110 | __u64 tb : 1; | |
111 | __u64 rt : 1; | |
112 | __u64 reserved3 : 4; | |
113 | __u64 cpl : 2; | |
114 | __u64 is : 1; | |
115 | __u64 mc : 1; | |
116 | __u64 it : 1; | |
117 | __u64 id : 1; | |
118 | __u64 da : 1; | |
119 | __u64 dd : 1; | |
120 | __u64 ss : 1; | |
121 | __u64 ri : 2; | |
122 | __u64 ed : 1; | |
123 | __u64 bn : 1; | |
124 | __u64 reserved4 : 19; | |
125 | }; | |
126 | ||
127 | /* | |
128 | * CPU type, hardware bug flags, and per-CPU state. Frequently used | |
129 | * state comes earlier: | |
130 | */ | |
131 | struct cpuinfo_ia64 { | |
132 | __u32 softirq_pending; | |
133 | __u64 itm_delta; /* # of clock cycles between clock ticks */ | |
134 | __u64 itm_next; /* interval timer mask value to use for next clock tick */ | |
135 | __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ | |
136 | __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ | |
137 | __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ | |
1da177e4 LT |
138 | __u64 itc_freq; /* frequency of ITC counter */ |
139 | __u64 proc_freq; /* frequency of processor */ | |
140 | __u64 cyc_per_usec; /* itc_freq/1000000 */ | |
141 | __u64 ptce_base; | |
142 | __u32 ptce_count[2]; | |
143 | __u32 ptce_stride[2]; | |
144 | struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ | |
145 | ||
146 | #ifdef CONFIG_SMP | |
147 | __u64 loops_per_jiffy; | |
148 | int cpu; | |
e927ecb0 SS |
149 | __u32 socket_id; /* physical processor socket id */ |
150 | __u16 core_id; /* core id */ | |
151 | __u16 thread_id; /* thread id */ | |
152 | __u16 num_log; /* Total number of logical processors on | |
153 | * this socket that were successfully booted */ | |
154 | __u8 cores_per_socket; /* Cores per processor socket */ | |
155 | __u8 threads_per_core; /* Threads per core */ | |
1da177e4 LT |
156 | #endif |
157 | ||
158 | /* CPUID-derived information: */ | |
159 | __u64 ppn; | |
160 | __u64 features; | |
161 | __u8 number; | |
162 | __u8 revision; | |
163 | __u8 model; | |
164 | __u8 family; | |
165 | __u8 archrev; | |
166 | char vendor[16]; | |
76d08bb3 | 167 | char *model_name; |
1da177e4 LT |
168 | |
169 | #ifdef CONFIG_NUMA | |
170 | struct ia64_node_data *node_data; | |
171 | #endif | |
172 | }; | |
173 | ||
174 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |
175 | ||
176 | /* | |
177 | * The "local" data variable. It refers to the per-CPU data of the currently executing | |
178 | * CPU, much like "current" points to the per-task data of the currently executing task. | |
179 | * Do not use the address of local_cpu_data, since it will be different from | |
180 | * cpu_data(smp_processor_id())! | |
181 | */ | |
182 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | |
183 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | |
184 | ||
1da177e4 LT |
185 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
186 | ||
187 | typedef struct { | |
188 | unsigned long seg; | |
189 | } mm_segment_t; | |
190 | ||
191 | #define SET_UNALIGN_CTL(task,value) \ | |
192 | ({ \ | |
193 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ | |
194 | | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ | |
195 | 0; \ | |
196 | }) | |
197 | #define GET_UNALIGN_CTL(task,addr) \ | |
198 | ({ \ | |
199 | put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ | |
200 | (int __user *) (addr)); \ | |
201 | }) | |
202 | ||
203 | #define SET_FPEMU_CTL(task,value) \ | |
204 | ({ \ | |
205 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ | |
206 | | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ | |
207 | 0; \ | |
208 | }) | |
209 | #define GET_FPEMU_CTL(task,addr) \ | |
210 | ({ \ | |
211 | put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ | |
212 | (int __user *) (addr)); \ | |
213 | }) | |
214 | ||
215 | #ifdef CONFIG_IA32_SUPPORT | |
216 | struct desc_struct { | |
217 | unsigned int a, b; | |
218 | }; | |
219 | ||
220 | #define desc_empty(desc) (!((desc)->a + (desc)->b)) | |
221 | #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | |
222 | ||
223 | #define GDT_ENTRY_TLS_ENTRIES 3 | |
224 | #define GDT_ENTRY_TLS_MIN 6 | |
225 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | |
226 | ||
227 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | |
228 | ||
229 | struct partial_page_list; | |
230 | #endif | |
231 | ||
232 | struct thread_struct { | |
233 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ | |
234 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ | |
235 | __u8 on_ustack; /* executing on user-stacks? */ | |
236 | __u8 pad[3]; | |
237 | __u64 ksp; /* kernel stack pointer */ | |
238 | __u64 map_base; /* base address for get_unmapped_area() */ | |
239 | __u64 task_size; /* limit for task size */ | |
240 | __u64 rbs_bot; /* the base address for the RBS */ | |
241 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ | |
242 | ||
243 | #ifdef CONFIG_IA32_SUPPORT | |
244 | __u64 eflag; /* IA32 EFLAGS reg */ | |
245 | __u64 fsr; /* IA32 floating pt status reg */ | |
246 | __u64 fcr; /* IA32 floating pt control reg */ | |
247 | __u64 fir; /* IA32 fp except. instr. reg */ | |
248 | __u64 fdr; /* IA32 fp except. data reg */ | |
249 | __u64 old_k1; /* old value of ar.k1 */ | |
250 | __u64 old_iob; /* old IOBase value */ | |
251 | struct partial_page_list *ppl; /* partial page list for 4K page size issue */ | |
252 | /* cached TLS descriptors. */ | |
253 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
254 | ||
255 | # define INIT_THREAD_IA32 .eflag = 0, \ | |
256 | .fsr = 0, \ | |
257 | .fcr = 0x17800000037fULL, \ | |
258 | .fir = 0, \ | |
259 | .fdr = 0, \ | |
260 | .old_k1 = 0, \ | |
261 | .old_iob = 0, \ | |
262 | .ppl = NULL, | |
263 | #else | |
264 | # define INIT_THREAD_IA32 | |
265 | #endif /* CONFIG_IA32_SUPPORT */ | |
266 | #ifdef CONFIG_PERFMON | |
267 | __u64 pmcs[IA64_NUM_PMC_REGS]; | |
268 | __u64 pmds[IA64_NUM_PMD_REGS]; | |
269 | void *pfm_context; /* pointer to detailed PMU context */ | |
270 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ | |
271 | # define INIT_THREAD_PM .pmcs = {0UL, }, \ | |
272 | .pmds = {0UL, }, \ | |
273 | .pfm_context = NULL, \ | |
274 | .pfm_needs_checking = 0UL, | |
275 | #else | |
276 | # define INIT_THREAD_PM | |
277 | #endif | |
278 | __u64 dbr[IA64_NUM_DBG_REGS]; | |
279 | __u64 ibr[IA64_NUM_DBG_REGS]; | |
280 | struct ia64_fpreg fph[96]; /* saved/loaded on demand */ | |
281 | }; | |
282 | ||
283 | #define INIT_THREAD { \ | |
284 | .flags = 0, \ | |
285 | .on_ustack = 0, \ | |
286 | .ksp = 0, \ | |
287 | .map_base = DEFAULT_MAP_BASE, \ | |
288 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ | |
289 | .task_size = DEFAULT_TASK_SIZE, \ | |
290 | .last_fph_cpu = -1, \ | |
291 | INIT_THREAD_IA32 \ | |
292 | INIT_THREAD_PM \ | |
293 | .dbr = {0, }, \ | |
294 | .ibr = {0, }, \ | |
295 | .fph = {{{{0}}}, } \ | |
296 | } | |
297 | ||
298 | #define start_thread(regs,new_ip,new_sp) do { \ | |
299 | set_fs(USER_DS); \ | |
300 | regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ | |
301 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ | |
302 | regs->cr_iip = new_ip; \ | |
303 | regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ | |
304 | regs->ar_rnat = 0; \ | |
305 | regs->ar_bspstore = current->thread.rbs_bot; \ | |
306 | regs->ar_fpsr = FPSR_DEFAULT; \ | |
307 | regs->loadrs = 0; \ | |
308 | regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ | |
309 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ | |
310 | if (unlikely(!current->mm->dumpable)) { \ | |
311 | /* \ | |
312 | * Zap scratch regs to avoid leaking bits between processes with different \ | |
313 | * uid/privileges. \ | |
314 | */ \ | |
315 | regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ | |
316 | regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ | |
317 | } \ | |
318 | } while (0) | |
319 | ||
320 | /* Forward declarations, a strange C thing... */ | |
321 | struct mm_struct; | |
322 | struct task_struct; | |
323 | ||
324 | /* | |
325 | * Free all resources held by a thread. This is called after the | |
326 | * parent of DEAD_TASK has collected the exit status of the task via | |
327 | * wait(). | |
328 | */ | |
329 | #define release_thread(dead_task) | |
330 | ||
331 | /* Prepare to copy thread state - unlazy all lazy status */ | |
332 | #define prepare_to_copy(tsk) do { } while (0) | |
333 | ||
334 | /* | |
335 | * This is the mechanism for creating a new kernel thread. | |
336 | * | |
337 | * NOTE 1: Only a kernel-only process (ie the swapper or direct | |
338 | * descendants who haven't done an "execve()") should use this: it | |
339 | * will work within a system call from a "real" process, but the | |
340 | * process memory space will not be free'd until both the parent and | |
341 | * the child have exited. | |
342 | * | |
343 | * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get | |
344 | * into trouble in init/main.c when the child thread returns to | |
345 | * do_basic_setup() and the timing is such that free_initmem() has | |
346 | * been called already. | |
347 | */ | |
348 | extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); | |
349 | ||
350 | /* Get wait channel for task P. */ | |
351 | extern unsigned long get_wchan (struct task_struct *p); | |
352 | ||
353 | /* Return instruction pointer of blocked task TSK. */ | |
354 | #define KSTK_EIP(tsk) \ | |
355 | ({ \ | |
6450578f | 356 | struct pt_regs *_regs = task_pt_regs(tsk); \ |
1da177e4 LT |
357 | _regs->cr_iip + ia64_psr(_regs)->ri; \ |
358 | }) | |
359 | ||
360 | /* Return stack pointer of blocked task TSK. */ | |
361 | #define KSTK_ESP(tsk) ((tsk)->thread.ksp) | |
362 | ||
363 | extern void ia64_getreg_unknown_kr (void); | |
364 | extern void ia64_setreg_unknown_kr (void); | |
365 | ||
366 | #define ia64_get_kr(regnum) \ | |
367 | ({ \ | |
368 | unsigned long r = 0; \ | |
369 | \ | |
370 | switch (regnum) { \ | |
371 | case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ | |
372 | case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ | |
373 | case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ | |
374 | case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ | |
375 | case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ | |
376 | case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ | |
377 | case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ | |
378 | case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ | |
379 | default: ia64_getreg_unknown_kr(); break; \ | |
380 | } \ | |
381 | r; \ | |
382 | }) | |
383 | ||
384 | #define ia64_set_kr(regnum, r) \ | |
385 | ({ \ | |
386 | switch (regnum) { \ | |
387 | case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ | |
388 | case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ | |
389 | case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ | |
390 | case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ | |
391 | case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ | |
392 | case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ | |
393 | case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ | |
394 | case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ | |
395 | default: ia64_setreg_unknown_kr(); break; \ | |
396 | } \ | |
397 | }) | |
398 | ||
399 | /* | |
400 | * The following three macros can't be inline functions because we don't have struct | |
401 | * task_struct at this point. | |
402 | */ | |
403 | ||
05062d96 PC |
404 | /* |
405 | * Return TRUE if task T owns the fph partition of the CPU we're running on. | |
406 | * Must be called from code that has preemption disabled. | |
407 | */ | |
1da177e4 LT |
408 | #define ia64_is_local_fpu_owner(t) \ |
409 | ({ \ | |
410 | struct task_struct *__ia64_islfo_task = (t); \ | |
411 | (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \ | |
412 | && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ | |
413 | }) | |
414 | ||
05062d96 PC |
415 | /* |
416 | * Mark task T as owning the fph partition of the CPU we're running on. | |
417 | * Must be called from code that has preemption disabled. | |
418 | */ | |
1da177e4 LT |
419 | #define ia64_set_local_fpu_owner(t) do { \ |
420 | struct task_struct *__ia64_slfo_task = (t); \ | |
421 | __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ | |
422 | ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \ | |
423 | } while (0) | |
424 | ||
425 | /* Mark the fph partition of task T as being invalid on all CPUs. */ | |
426 | #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1) | |
427 | ||
428 | extern void __ia64_init_fpu (void); | |
429 | extern void __ia64_save_fpu (struct ia64_fpreg *fph); | |
430 | extern void __ia64_load_fpu (struct ia64_fpreg *fph); | |
431 | extern void ia64_save_debug_regs (unsigned long *save_area); | |
432 | extern void ia64_load_debug_regs (unsigned long *save_area); | |
433 | ||
434 | #ifdef CONFIG_IA32_SUPPORT | |
435 | extern void ia32_save_state (struct task_struct *task); | |
436 | extern void ia32_load_state (struct task_struct *task); | |
437 | #endif | |
438 | ||
439 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | |
440 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | |
441 | ||
442 | /* load fp 0.0 into fph */ | |
443 | static inline void | |
444 | ia64_init_fpu (void) { | |
445 | ia64_fph_enable(); | |
446 | __ia64_init_fpu(); | |
447 | ia64_fph_disable(); | |
448 | } | |
449 | ||
450 | /* save f32-f127 at FPH */ | |
451 | static inline void | |
452 | ia64_save_fpu (struct ia64_fpreg *fph) { | |
453 | ia64_fph_enable(); | |
454 | __ia64_save_fpu(fph); | |
455 | ia64_fph_disable(); | |
456 | } | |
457 | ||
458 | /* load f32-f127 from FPH */ | |
459 | static inline void | |
460 | ia64_load_fpu (struct ia64_fpreg *fph) { | |
461 | ia64_fph_enable(); | |
462 | __ia64_load_fpu(fph); | |
463 | ia64_fph_disable(); | |
464 | } | |
465 | ||
466 | static inline __u64 | |
467 | ia64_clear_ic (void) | |
468 | { | |
469 | __u64 psr; | |
470 | psr = ia64_getreg(_IA64_REG_PSR); | |
471 | ia64_stop(); | |
472 | ia64_rsm(IA64_PSR_I | IA64_PSR_IC); | |
473 | ia64_srlz_i(); | |
474 | return psr; | |
475 | } | |
476 | ||
477 | /* | |
478 | * Restore the psr. | |
479 | */ | |
480 | static inline void | |
481 | ia64_set_psr (__u64 psr) | |
482 | { | |
483 | ia64_stop(); | |
484 | ia64_setreg(_IA64_REG_PSR_L, psr); | |
485 | ia64_srlz_d(); | |
486 | } | |
487 | ||
488 | /* | |
489 | * Insert a translation into an instruction and/or data translation | |
490 | * register. | |
491 | */ | |
492 | static inline void | |
493 | ia64_itr (__u64 target_mask, __u64 tr_num, | |
494 | __u64 vmaddr, __u64 pte, | |
495 | __u64 log_page_size) | |
496 | { | |
497 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | |
498 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | |
499 | ia64_stop(); | |
500 | if (target_mask & 0x1) | |
501 | ia64_itri(tr_num, pte); | |
502 | if (target_mask & 0x2) | |
503 | ia64_itrd(tr_num, pte); | |
504 | } | |
505 | ||
506 | /* | |
507 | * Insert a translation into the instruction and/or data translation | |
508 | * cache. | |
509 | */ | |
510 | static inline void | |
511 | ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, | |
512 | __u64 log_page_size) | |
513 | { | |
514 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | |
515 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | |
516 | ia64_stop(); | |
517 | /* as per EAS2.6, itc must be the last instruction in an instruction group */ | |
518 | if (target_mask & 0x1) | |
519 | ia64_itci(pte); | |
520 | if (target_mask & 0x2) | |
521 | ia64_itcd(pte); | |
522 | } | |
523 | ||
524 | /* | |
525 | * Purge a range of addresses from instruction and/or data translation | |
526 | * register(s). | |
527 | */ | |
528 | static inline void | |
529 | ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) | |
530 | { | |
531 | if (target_mask & 0x1) | |
532 | ia64_ptri(vmaddr, (log_size << 2)); | |
533 | if (target_mask & 0x2) | |
534 | ia64_ptrd(vmaddr, (log_size << 2)); | |
535 | } | |
536 | ||
537 | /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ | |
538 | static inline void | |
539 | ia64_set_iva (void *ivt_addr) | |
540 | { | |
541 | ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); | |
542 | ia64_srlz_i(); | |
543 | } | |
544 | ||
545 | /* Set the page table address and control bits. */ | |
546 | static inline void | |
547 | ia64_set_pta (__u64 pta) | |
548 | { | |
549 | /* Note: srlz.i implies srlz.d */ | |
550 | ia64_setreg(_IA64_REG_CR_PTA, pta); | |
551 | ia64_srlz_i(); | |
552 | } | |
553 | ||
554 | static inline void | |
555 | ia64_eoi (void) | |
556 | { | |
557 | ia64_setreg(_IA64_REG_CR_EOI, 0); | |
558 | ia64_srlz_d(); | |
559 | } | |
560 | ||
561 | #define cpu_relax() ia64_hint(ia64_hint_pause) | |
562 | ||
a5878691 BH |
563 | static inline int |
564 | ia64_get_irr(unsigned int vector) | |
565 | { | |
566 | unsigned int reg = vector / 64; | |
567 | unsigned int bit = vector % 64; | |
568 | u64 irr; | |
569 | ||
570 | switch (reg) { | |
571 | case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; | |
572 | case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; | |
573 | case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; | |
574 | case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; | |
575 | } | |
576 | ||
577 | return test_bit(bit, &irr); | |
578 | } | |
579 | ||
1da177e4 LT |
580 | static inline void |
581 | ia64_set_lrr0 (unsigned long val) | |
582 | { | |
583 | ia64_setreg(_IA64_REG_CR_LRR0, val); | |
584 | ia64_srlz_d(); | |
585 | } | |
586 | ||
587 | static inline void | |
588 | ia64_set_lrr1 (unsigned long val) | |
589 | { | |
590 | ia64_setreg(_IA64_REG_CR_LRR1, val); | |
591 | ia64_srlz_d(); | |
592 | } | |
593 | ||
594 | ||
595 | /* | |
596 | * Given the address to which a spill occurred, return the unat bit | |
597 | * number that corresponds to this address. | |
598 | */ | |
599 | static inline __u64 | |
600 | ia64_unat_pos (void *spill_addr) | |
601 | { | |
602 | return ((__u64) spill_addr >> 3) & 0x3f; | |
603 | } | |
604 | ||
605 | /* | |
606 | * Set the NaT bit of an integer register which was spilled at address | |
607 | * SPILL_ADDR. UNAT is the mask to be updated. | |
608 | */ | |
609 | static inline void | |
610 | ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) | |
611 | { | |
612 | __u64 bit = ia64_unat_pos(spill_addr); | |
613 | __u64 mask = 1UL << bit; | |
614 | ||
615 | *unat = (*unat & ~mask) | (nat << bit); | |
616 | } | |
617 | ||
618 | /* | |
619 | * Return saved PC of a blocked thread. | |
620 | * Note that the only way T can block is through a call to schedule() -> switch_to(). | |
621 | */ | |
622 | static inline unsigned long | |
623 | thread_saved_pc (struct task_struct *t) | |
624 | { | |
625 | struct unw_frame_info info; | |
626 | unsigned long ip; | |
627 | ||
628 | unw_init_from_blocked_task(&info, t); | |
629 | if (unw_unwind(&info) < 0) | |
630 | return 0; | |
631 | unw_get_ip(&info, &ip); | |
632 | return ip; | |
633 | } | |
634 | ||
635 | /* | |
636 | * Get the current instruction/program counter value. | |
637 | */ | |
638 | #define current_text_addr() \ | |
639 | ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) | |
640 | ||
641 | static inline __u64 | |
642 | ia64_get_ivr (void) | |
643 | { | |
644 | __u64 r; | |
645 | ia64_srlz_d(); | |
646 | r = ia64_getreg(_IA64_REG_CR_IVR); | |
647 | ia64_srlz_d(); | |
648 | return r; | |
649 | } | |
650 | ||
651 | static inline void | |
652 | ia64_set_dbr (__u64 regnum, __u64 value) | |
653 | { | |
654 | __ia64_set_dbr(regnum, value); | |
655 | #ifdef CONFIG_ITANIUM | |
656 | ia64_srlz_d(); | |
657 | #endif | |
658 | } | |
659 | ||
660 | static inline __u64 | |
661 | ia64_get_dbr (__u64 regnum) | |
662 | { | |
663 | __u64 retval; | |
664 | ||
665 | retval = __ia64_get_dbr(regnum); | |
666 | #ifdef CONFIG_ITANIUM | |
667 | ia64_srlz_d(); | |
668 | #endif | |
669 | return retval; | |
670 | } | |
671 | ||
672 | static inline __u64 | |
673 | ia64_rotr (__u64 w, __u64 n) | |
674 | { | |
675 | return (w >> n) | (w << (64 - n)); | |
676 | } | |
677 | ||
678 | #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) | |
679 | ||
680 | /* | |
681 | * Take a mapped kernel address and return the equivalent address | |
682 | * in the region 7 identity mapped virtual area. | |
683 | */ | |
684 | static inline void * | |
685 | ia64_imva (void *addr) | |
686 | { | |
687 | void *result; | |
688 | result = (void *) ia64_tpa(addr); | |
689 | return __va(result); | |
690 | } | |
691 | ||
692 | #define ARCH_HAS_PREFETCH | |
693 | #define ARCH_HAS_PREFETCHW | |
694 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
695 | #define PREFETCH_STRIDE L1_CACHE_BYTES | |
696 | ||
697 | static inline void | |
698 | prefetch (const void *x) | |
699 | { | |
700 | ia64_lfetch(ia64_lfhint_none, x); | |
701 | } | |
702 | ||
703 | static inline void | |
704 | prefetchw (const void *x) | |
705 | { | |
706 | ia64_lfetch_excl(ia64_lfhint_none, x); | |
707 | } | |
708 | ||
709 | #define spin_lock_prefetch(x) prefetchw(x) | |
710 | ||
711 | extern unsigned long boot_option_idle_override; | |
712 | ||
713 | #endif /* !__ASSEMBLY__ */ | |
714 | ||
715 | #endif /* _ASM_IA64_PROCESSOR_H */ |