]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/sparc64/kernel/smp.c
sparc64: Add missing notify_cpu_starting() call.
[net-next-2.6.git] / arch / sparc64 / kernel / smp.c
CommitLineData
1da177e4
LT
1/* smp.c: Sparc64 SMP support.
2 *
cf3d7c1e 3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
1da177e4
LT
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
b9709456 23#include <linux/lmb.h>
82960b85 24#include <linux/cpu.h>
1da177e4
LT
25
26#include <asm/head.h>
27#include <asm/ptrace.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/mmu_context.h>
31#include <asm/cpudata.h>
27a2ef38
DM
32#include <asm/hvtramp.h>
33#include <asm/io.h>
cf3d7c1e 34#include <asm/timer.h>
1da177e4
LT
35
36#include <asm/irq.h>
6d24c8dc 37#include <asm/irq_regs.h>
1da177e4
LT
38#include <asm/page.h>
39#include <asm/pgtable.h>
40#include <asm/oplib.h>
41#include <asm/uaccess.h>
1da177e4
LT
42#include <asm/starfire.h>
43#include <asm/tlb.h>
56fb4df6 44#include <asm/sections.h>
07f8e5f3 45#include <asm/prom.h>
5cbc3073 46#include <asm/mdesc.h>
4f0234f4 47#include <asm/ldc.h>
e0204409 48#include <asm/hypervisor.h>
1da177e4 49
a2f9f6bb
DM
50int sparc64_multi_core __read_mostly;
51
4f0234f4 52cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
c12a8289 53cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
d5a7430d 54DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
f78eae2e
DM
55cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
4f0234f4
DM
57
58EXPORT_SYMBOL(cpu_possible_map);
59EXPORT_SYMBOL(cpu_online_map);
d5a7430d 60EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
4f0234f4
DM
61EXPORT_SYMBOL(cpu_core_map);
62
1da177e4 63static cpumask_t smp_commenced_mask;
1da177e4
LT
64
65void smp_info(struct seq_file *m)
66{
67 int i;
68
69 seq_printf(m, "State:\n");
394e3902
AM
70 for_each_online_cpu(i)
71 seq_printf(m, "CPU%d:\t\tonline\n", i);
1da177e4
LT
72}
73
74void smp_bogo(struct seq_file *m)
75{
76 int i;
77
394e3902
AM
78 for_each_online_cpu(i)
79 seq_printf(m,
394e3902 80 "Cpu%dClkTck\t: %016lx\n",
394e3902 81 i, cpu_data(i).clock_tick);
1da177e4
LT
82}
83
112f4871 84extern void setup_sparc64_timer(void);
1da177e4
LT
85
86static volatile unsigned long callin_flag = 0;
87
0f7f22d9 88void __cpuinit smp_callin(void)
1da177e4
LT
89{
90 int cpuid = hard_smp_processor_id();
91
56fb4df6 92 __local_per_cpu_offset = __per_cpu_offset(cpuid);
1da177e4 93
4a07e646 94 if (tlb_type == hypervisor)
490384e7 95 sun4v_ktsb_register();
481295f9 96
56fb4df6 97 __flush_tlb_all();
1da177e4 98
112f4871 99 setup_sparc64_timer();
1da177e4 100
816242da
DM
101 if (cheetah_pcache_forced_on)
102 cheetah_enable_pcache();
103
1da177e4
LT
104 local_irq_enable();
105
1da177e4
LT
106 callin_flag = 1;
107 __asm__ __volatile__("membar #Sync\n\t"
108 "flush %%g6" : : : "memory");
109
110 /* Clear this or we will die instantly when we
111 * schedule back to this idler...
112 */
db7d9a4e 113 current_thread_info()->new_child = 0;
1da177e4
LT
114
115 /* Attach to the address space of init_task. */
116 atomic_inc(&init_mm.mm_count);
117 current->active_mm = &init_mm;
118
82960b85
DM
119 /* inform the notifiers about the new cpu */
120 notify_cpu_starting(cpuid);
121
1da177e4 122 while (!cpu_isset(cpuid, smp_commenced_mask))
4f07118f 123 rmb();
1da177e4 124
e5bd1c3f 125 ipi_call_lock();
1da177e4 126 cpu_set(cpuid, cpu_online_map);
e5bd1c3f 127 ipi_call_unlock();
5bfb5d69
NP
128
129 /* idle thread is expected to have preempt disabled */
130 preempt_disable();
1da177e4
LT
131}
132
133void cpu_panic(void)
134{
135 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
136 panic("SMP bolixed\n");
137}
138
1da177e4
LT
139/* This tick register synchronization scheme is taken entirely from
140 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
141 *
142 * The only change I've made is to rework it so that the master
143 * initiates the synchonization instead of the slave. -DaveM
144 */
145
146#define MASTER 0
147#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
148
149#define NUM_ROUNDS 64 /* magic value */
150#define NUM_ITERS 5 /* likewise */
151
152static DEFINE_SPINLOCK(itc_sync_lock);
153static unsigned long go[SLAVE + 1];
154
155#define DEBUG_TICK_SYNC 0
156
157static inline long get_delta (long *rt, long *master)
158{
159 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
160 unsigned long tcenter, t0, t1, tm;
161 unsigned long i;
162
163 for (i = 0; i < NUM_ITERS; i++) {
164 t0 = tick_ops->get_tick();
165 go[MASTER] = 1;
4f07118f 166 membar_storeload();
1da177e4 167 while (!(tm = go[SLAVE]))
4f07118f 168 rmb();
1da177e4 169 go[SLAVE] = 0;
4f07118f 170 wmb();
1da177e4
LT
171 t1 = tick_ops->get_tick();
172
173 if (t1 - t0 < best_t1 - best_t0)
174 best_t0 = t0, best_t1 = t1, best_tm = tm;
175 }
176
177 *rt = best_t1 - best_t0;
178 *master = best_tm - best_t0;
179
180 /* average best_t0 and best_t1 without overflow: */
181 tcenter = (best_t0/2 + best_t1/2);
182 if (best_t0 % 2 + best_t1 % 2 == 2)
183 tcenter++;
184 return tcenter - best_tm;
185}
186
187void smp_synchronize_tick_client(void)
188{
189 long i, delta, adj, adjust_latency = 0, done = 0;
190 unsigned long flags, rt, master_time_stamp, bound;
191#if DEBUG_TICK_SYNC
192 struct {
193 long rt; /* roundtrip time */
194 long master; /* master's timestamp */
195 long diff; /* difference between midpoint and master's timestamp */
196 long lat; /* estimate of itc adjustment latency */
197 } t[NUM_ROUNDS];
198#endif
199
200 go[MASTER] = 1;
201
202 while (go[MASTER])
4f07118f 203 rmb();
1da177e4
LT
204
205 local_irq_save(flags);
206 {
207 for (i = 0; i < NUM_ROUNDS; i++) {
208 delta = get_delta(&rt, &master_time_stamp);
209 if (delta == 0) {
210 done = 1; /* let's lock on to this... */
211 bound = rt;
212 }
213
214 if (!done) {
215 if (i > 0) {
216 adjust_latency += -delta;
217 adj = -delta + adjust_latency/4;
218 } else
219 adj = -delta;
220
112f4871 221 tick_ops->add_tick(adj);
1da177e4
LT
222 }
223#if DEBUG_TICK_SYNC
224 t[i].rt = rt;
225 t[i].master = master_time_stamp;
226 t[i].diff = delta;
227 t[i].lat = adjust_latency/4;
228#endif
229 }
230 }
231 local_irq_restore(flags);
232
233#if DEBUG_TICK_SYNC
234 for (i = 0; i < NUM_ROUNDS; i++)
235 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
236 t[i].rt, t[i].master, t[i].diff, t[i].lat);
237#endif
238
519c4d2d
JP
239 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
240 "(last diff %ld cycles, maxerr %lu cycles)\n",
241 smp_processor_id(), delta, rt);
1da177e4
LT
242}
243
244static void smp_start_sync_tick_client(int cpu);
245
246static void smp_synchronize_one_tick(int cpu)
247{
248 unsigned long flags, i;
249
250 go[MASTER] = 0;
251
252 smp_start_sync_tick_client(cpu);
253
254 /* wait for client to be ready */
255 while (!go[MASTER])
4f07118f 256 rmb();
1da177e4
LT
257
258 /* now let the client proceed into his loop */
259 go[MASTER] = 0;
4f07118f 260 membar_storeload();
1da177e4
LT
261
262 spin_lock_irqsave(&itc_sync_lock, flags);
263 {
264 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
265 while (!go[MASTER])
4f07118f 266 rmb();
1da177e4 267 go[MASTER] = 0;
4f07118f 268 wmb();
1da177e4 269 go[SLAVE] = tick_ops->get_tick();
4f07118f 270 membar_storeload();
1da177e4
LT
271 }
272 }
273 spin_unlock_irqrestore(&itc_sync_lock, flags);
274}
275
b14f5c10 276#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
27a2ef38
DM
277/* XXX Put this in some common place. XXX */
278static unsigned long kimage_addr_to_ra(void *p)
279{
280 unsigned long val = (unsigned long) p;
281
282 return kern_base + (val - KERNBASE);
283}
284
b14f5c10
DM
285static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
286{
287 extern unsigned long sparc64_ttable_tl0;
288 extern unsigned long kern_locked_tte_data;
b14f5c10
DM
289 struct hvtramp_descr *hdesc;
290 unsigned long trampoline_ra;
291 struct trap_per_cpu *tb;
292 u64 tte_vaddr, tte_data;
293 unsigned long hv_err;
64658743 294 int i;
b14f5c10 295
64658743
DM
296 hdesc = kzalloc(sizeof(*hdesc) +
297 (sizeof(struct hvtramp_mapping) *
298 num_kernel_image_mappings - 1),
299 GFP_KERNEL);
b14f5c10 300 if (!hdesc) {
27a2ef38 301 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
b14f5c10
DM
302 "hvtramp_descr.\n");
303 return;
304 }
305
306 hdesc->cpu = cpu;
64658743 307 hdesc->num_mappings = num_kernel_image_mappings;
b14f5c10
DM
308
309 tb = &trap_block[cpu];
310 tb->hdesc = hdesc;
311
312 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
313 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
314
315 hdesc->thread_reg = thread_reg;
316
317 tte_vaddr = (unsigned long) KERNBASE;
318 tte_data = kern_locked_tte_data;
319
64658743
DM
320 for (i = 0; i < hdesc->num_mappings; i++) {
321 hdesc->maps[i].vaddr = tte_vaddr;
322 hdesc->maps[i].tte = tte_data;
b14f5c10
DM
323 tte_vaddr += 0x400000;
324 tte_data += 0x400000;
b14f5c10
DM
325 }
326
327 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
328
329 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
330 kimage_addr_to_ra(&sparc64_ttable_tl0),
331 __pa(hdesc));
e0204409
DM
332 if (hv_err)
333 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
334 "gives error %lu\n", hv_err);
b14f5c10
DM
335}
336#endif
337
1da177e4
LT
338extern unsigned long sparc64_cpu_startup;
339
340/* The OBP cpu startup callback truncates the 3rd arg cookie to
341 * 32-bits (I think) so to be safe we have it read the pointer
342 * contained here so we work on >4GB machines. -DaveM
343 */
344static struct thread_info *cpu_new_thread = NULL;
345
346static int __devinit smp_boot_one_cpu(unsigned int cpu)
347{
b37d40d1 348 struct trap_per_cpu *tb = &trap_block[cpu];
1da177e4
LT
349 unsigned long entry =
350 (unsigned long)(&sparc64_cpu_startup);
351 unsigned long cookie =
352 (unsigned long)(&cpu_new_thread);
353 struct task_struct *p;
7890f794 354 int timeout, ret;
1da177e4
LT
355
356 p = fork_idle(cpu);
1177bf97
AM
357 if (IS_ERR(p))
358 return PTR_ERR(p);
1da177e4 359 callin_flag = 0;
f3169641 360 cpu_new_thread = task_thread_info(p);
1da177e4 361
7890f794 362 if (tlb_type == hypervisor) {
b14f5c10 363#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
4f0234f4
DM
364 if (ldom_domaining_enabled)
365 ldom_startcpu_cpuid(cpu,
366 (unsigned long) cpu_new_thread);
367 else
368#endif
369 prom_startcpu_cpuid(cpu, entry, cookie);
7890f794 370 } else {
5cbc3073 371 struct device_node *dp = of_find_node_by_cpuid(cpu);
7890f794 372
07f8e5f3 373 prom_startcpu(dp->node, entry, cookie);
7890f794 374 }
1da177e4 375
4f0234f4 376 for (timeout = 0; timeout < 50000; timeout++) {
1da177e4
LT
377 if (callin_flag)
378 break;
379 udelay(100);
380 }
72aff53f 381
1da177e4
LT
382 if (callin_flag) {
383 ret = 0;
384 } else {
385 printk("Processor %d is stuck.\n", cpu);
1da177e4
LT
386 ret = -ENODEV;
387 }
388 cpu_new_thread = NULL;
389
b37d40d1
DM
390 if (tb->hdesc) {
391 kfree(tb->hdesc);
392 tb->hdesc = NULL;
393 }
394
1da177e4
LT
395 return ret;
396}
397
398static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
399{
400 u64 result, target;
401 int stuck, tmp;
402
403 if (this_is_starfire) {
404 /* map to real upaid */
405 cpu = (((cpu & 0x3c) << 1) |
406 ((cpu & 0x40) >> 4) |
407 (cpu & 0x3));
408 }
409
410 target = (cpu << 14) | 0x70;
411again:
412 /* Ok, this is the real Spitfire Errata #54.
413 * One must read back from a UDB internal register
414 * after writes to the UDB interrupt dispatch, but
415 * before the membar Sync for that write.
416 * So we use the high UDB control register (ASI 0x7f,
417 * ADDR 0x20) for the dummy read. -DaveM
418 */
419 tmp = 0x40;
420 __asm__ __volatile__(
421 "wrpr %1, %2, %%pstate\n\t"
422 "stxa %4, [%0] %3\n\t"
423 "stxa %5, [%0+%8] %3\n\t"
424 "add %0, %8, %0\n\t"
425 "stxa %6, [%0+%8] %3\n\t"
426 "membar #Sync\n\t"
427 "stxa %%g0, [%7] %3\n\t"
428 "membar #Sync\n\t"
429 "mov 0x20, %%g1\n\t"
430 "ldxa [%%g1] 0x7f, %%g0\n\t"
431 "membar #Sync"
432 : "=r" (tmp)
433 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
434 "r" (data0), "r" (data1), "r" (data2), "r" (target),
435 "r" (0x10), "0" (tmp)
436 : "g1");
437
438 /* NOTE: PSTATE_IE is still clear. */
439 stuck = 100000;
440 do {
441 __asm__ __volatile__("ldxa [%%g0] %1, %0"
442 : "=r" (result)
443 : "i" (ASI_INTR_DISPATCH_STAT));
444 if (result == 0) {
445 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
446 : : "r" (pstate));
447 return;
448 }
449 stuck -= 1;
450 if (stuck == 0)
451 break;
452 } while (result & 0x1);
453 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
454 : : "r" (pstate));
455 if (stuck == 0) {
456 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
457 smp_processor_id(), result);
458 } else {
459 udelay(2);
460 goto again;
461 }
462}
463
90f7ae8a 464static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
1da177e4 465{
90f7ae8a
DM
466 u64 *mondo, data0, data1, data2;
467 u16 *cpu_list;
1da177e4
LT
468 u64 pstate;
469 int i;
470
471 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
90f7ae8a
DM
472 cpu_list = __va(tb->cpu_list_pa);
473 mondo = __va(tb->cpu_mondo_block_pa);
474 data0 = mondo[0];
475 data1 = mondo[1];
476 data2 = mondo[2];
477 for (i = 0; i < cnt; i++)
478 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
1da177e4
LT
479}
480
481/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
482 * packet, but we have no use for that. However we do take advantage of
483 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
484 */
90f7ae8a 485static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
1da177e4 486{
22adb358 487 int nack_busy_id, is_jbus, need_more;
90f7ae8a
DM
488 u64 *mondo, pstate, ver, busy_mask;
489 u16 *cpu_list;
1da177e4 490
90f7ae8a
DM
491 cpu_list = __va(tb->cpu_list_pa);
492 mondo = __va(tb->cpu_mondo_block_pa);
cd5bc89d 493
1da177e4
LT
494 /* Unfortunately, someone at Sun had the brilliant idea to make the
495 * busy/nack fields hard-coded by ITID number for this Ultra-III
496 * derivative processor.
497 */
498 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
499 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
500 (ver >> 32) == __SERRANO_ID);
1da177e4
LT
501
502 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
503
504retry:
22adb358 505 need_more = 0;
1da177e4
LT
506 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
507 : : "r" (pstate), "i" (PSTATE_IE));
508
509 /* Setup the dispatch data registers. */
510 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
511 "stxa %1, [%4] %6\n\t"
512 "stxa %2, [%5] %6\n\t"
513 "membar #Sync\n\t"
514 : /* no outputs */
90f7ae8a 515 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
1da177e4
LT
516 "r" (0x40), "r" (0x50), "r" (0x60),
517 "i" (ASI_INTR_W));
518
519 nack_busy_id = 0;
0de56d1a 520 busy_mask = 0;
1da177e4
LT
521 {
522 int i;
523
90f7ae8a
DM
524 for (i = 0; i < cnt; i++) {
525 u64 target, nr;
526
527 nr = cpu_list[i];
528 if (nr == 0xffff)
529 continue;
1da177e4 530
90f7ae8a 531 target = (nr << 14) | 0x70;
0de56d1a 532 if (is_jbus) {
90f7ae8a 533 busy_mask |= (0x1UL << (nr * 2));
0de56d1a 534 } else {
1da177e4 535 target |= (nack_busy_id << 24);
0de56d1a
DM
536 busy_mask |= (0x1UL <<
537 (nack_busy_id * 2));
538 }
1da177e4
LT
539 __asm__ __volatile__(
540 "stxa %%g0, [%0] %1\n\t"
541 "membar #Sync\n\t"
542 : /* no outputs */
543 : "r" (target), "i" (ASI_INTR_W));
544 nack_busy_id++;
22adb358
DM
545 if (nack_busy_id == 32) {
546 need_more = 1;
547 break;
548 }
1da177e4
LT
549 }
550 }
551
552 /* Now, poll for completion. */
553 {
0de56d1a 554 u64 dispatch_stat, nack_mask;
1da177e4
LT
555 long stuck;
556
557 stuck = 100000 * nack_busy_id;
0de56d1a 558 nack_mask = busy_mask << 1;
1da177e4
LT
559 do {
560 __asm__ __volatile__("ldxa [%%g0] %1, %0"
561 : "=r" (dispatch_stat)
562 : "i" (ASI_INTR_DISPATCH_STAT));
0de56d1a 563 if (!(dispatch_stat & (busy_mask | nack_mask))) {
1da177e4
LT
564 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
565 : : "r" (pstate));
22adb358 566 if (unlikely(need_more)) {
90f7ae8a
DM
567 int i, this_cnt = 0;
568 for (i = 0; i < cnt; i++) {
569 if (cpu_list[i] == 0xffff)
570 continue;
571 cpu_list[i] = 0xffff;
572 this_cnt++;
573 if (this_cnt == 32)
22adb358
DM
574 break;
575 }
576 goto retry;
577 }
1da177e4
LT
578 return;
579 }
580 if (!--stuck)
581 break;
0de56d1a 582 } while (dispatch_stat & busy_mask);
1da177e4
LT
583
584 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
585 : : "r" (pstate));
586
0de56d1a 587 if (dispatch_stat & busy_mask) {
1da177e4
LT
588 /* Busy bits will not clear, continue instead
589 * of freezing up on this cpu.
590 */
591 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
592 smp_processor_id(), dispatch_stat);
593 } else {
594 int i, this_busy_nack = 0;
595
596 /* Delay some random time with interrupts enabled
597 * to prevent deadlock.
598 */
599 udelay(2 * nack_busy_id);
600
601 /* Clear out the mask bits for cpus which did not
602 * NACK us.
603 */
90f7ae8a
DM
604 for (i = 0; i < cnt; i++) {
605 u64 check_mask, nr;
606
607 nr = cpu_list[i];
608 if (nr == 0xffff)
609 continue;
1da177e4 610
92704a1c 611 if (is_jbus)
90f7ae8a 612 check_mask = (0x2UL << (2*nr));
1da177e4
LT
613 else
614 check_mask = (0x2UL <<
615 this_busy_nack);
616 if ((dispatch_stat & check_mask) == 0)
90f7ae8a 617 cpu_list[i] = 0xffff;
1da177e4 618 this_busy_nack += 2;
22adb358
DM
619 if (this_busy_nack == 64)
620 break;
1da177e4
LT
621 }
622
623 goto retry;
624 }
625 }
626}
627
1d2f1f90 628/* Multi-cpu list version. */
90f7ae8a 629static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
a43fe0e7 630{
ed4d9c66 631 int retries, this_cpu, prev_sent, i, saw_cpu_error;
c02a5119 632 unsigned long status;
b830ab66 633 u16 *cpu_list;
17f34f0e 634
b830ab66 635 this_cpu = smp_processor_id();
1d2f1f90 636
b830ab66
DM
637 cpu_list = __va(tb->cpu_list_pa);
638
ed4d9c66 639 saw_cpu_error = 0;
1d2f1f90 640 retries = 0;
3cab0c3e 641 prev_sent = 0;
1d2f1f90 642 do {
3cab0c3e 643 int forward_progress, n_sent;
1d2f1f90 644
b830ab66
DM
645 status = sun4v_cpu_mondo_send(cnt,
646 tb->cpu_list_pa,
647 tb->cpu_mondo_block_pa);
648
649 /* HV_EOK means all cpus received the xcall, we're done. */
650 if (likely(status == HV_EOK))
1d2f1f90 651 break;
b830ab66 652
3cab0c3e
DM
653 /* First, see if we made any forward progress.
654 *
655 * The hypervisor indicates successful sends by setting
656 * cpu list entries to the value 0xffff.
b830ab66 657 */
3cab0c3e 658 n_sent = 0;
b830ab66 659 for (i = 0; i < cnt; i++) {
3cab0c3e
DM
660 if (likely(cpu_list[i] == 0xffff))
661 n_sent++;
1d2f1f90
DM
662 }
663
3cab0c3e
DM
664 forward_progress = 0;
665 if (n_sent > prev_sent)
666 forward_progress = 1;
667
668 prev_sent = n_sent;
669
b830ab66
DM
670 /* If we get a HV_ECPUERROR, then one or more of the cpus
671 * in the list are in error state. Use the cpu_state()
672 * hypervisor call to find out which cpus are in error state.
673 */
674 if (unlikely(status == HV_ECPUERROR)) {
675 for (i = 0; i < cnt; i++) {
676 long err;
677 u16 cpu;
678
679 cpu = cpu_list[i];
680 if (cpu == 0xffff)
681 continue;
682
683 err = sun4v_cpu_state(cpu);
ed4d9c66
DM
684 if (err == HV_CPU_STATE_ERROR) {
685 saw_cpu_error = (cpu + 1);
3cab0c3e 686 cpu_list[i] = 0xffff;
b830ab66
DM
687 }
688 }
689 } else if (unlikely(status != HV_EWOULDBLOCK))
690 goto fatal_mondo_error;
691
3cab0c3e
DM
692 /* Don't bother rewriting the CPU list, just leave the
693 * 0xffff and non-0xffff entries in there and the
694 * hypervisor will do the right thing.
695 *
696 * Only advance timeout state if we didn't make any
697 * forward progress.
698 */
b830ab66
DM
699 if (unlikely(!forward_progress)) {
700 if (unlikely(++retries > 10000))
701 goto fatal_mondo_timeout;
702
703 /* Delay a little bit to let other cpus catch up
704 * on their cpu mondo queue work.
705 */
706 udelay(2 * cnt);
707 }
1d2f1f90
DM
708 } while (1);
709
ed4d9c66 710 if (unlikely(saw_cpu_error))
b830ab66
DM
711 goto fatal_mondo_cpu_error;
712
713 return;
714
715fatal_mondo_cpu_error:
716 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
ed4d9c66
DM
717 "(including %d) were in error state\n",
718 this_cpu, saw_cpu_error - 1);
b830ab66
DM
719 return;
720
721fatal_mondo_timeout:
b830ab66
DM
722 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
723 " progress after %d retries.\n",
724 this_cpu, retries);
725 goto dump_cpu_list_and_out;
726
727fatal_mondo_error:
b830ab66
DM
728 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
729 this_cpu, status);
730 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
731 "mondo_block_pa(%lx)\n",
732 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
733
734dump_cpu_list_and_out:
735 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
736 for (i = 0; i < cnt; i++)
737 printk("%u ", cpu_list[i]);
738 printk("]\n");
1d2f1f90 739}
a43fe0e7 740
90f7ae8a 741static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
deb16999
DM
742
743static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
744{
90f7ae8a
DM
745 struct trap_per_cpu *tb;
746 int this_cpu, i, cnt;
c02a5119 747 unsigned long flags;
90f7ae8a
DM
748 u16 *cpu_list;
749 u64 *mondo;
c02a5119
DM
750
751 /* We have to do this whole thing with interrupts fully disabled.
752 * Otherwise if we send an xcall from interrupt context it will
753 * corrupt both our mondo block and cpu list state.
754 *
755 * One consequence of this is that we cannot use timeout mechanisms
756 * that depend upon interrupts being delivered locally. So, for
757 * example, we cannot sample jiffies and expect it to advance.
758 *
759 * Fortunately, udelay() uses %stick/%tick so we can use that.
760 */
761 local_irq_save(flags);
90f7ae8a
DM
762
763 this_cpu = smp_processor_id();
764 tb = &trap_block[this_cpu];
765
766 mondo = __va(tb->cpu_mondo_block_pa);
767 mondo[0] = data0;
768 mondo[1] = data1;
769 mondo[2] = data2;
770 wmb();
771
772 cpu_list = __va(tb->cpu_list_pa);
773
774 /* Setup the initial cpu list. */
775 cnt = 0;
776 for_each_cpu_mask_nr(i, *mask) {
777 if (i == this_cpu || !cpu_online(i))
778 continue;
779 cpu_list[cnt++] = i;
780 }
781
782 if (cnt)
783 xcall_deliver_impl(tb, cnt);
784
c02a5119 785 local_irq_restore(flags);
deb16999 786}
5e0797e5 787
91a4231c
DM
788/* Send cross call to all processors mentioned in MASK_P
789 * except self. Really, there are only two cases currently,
790 * "&cpu_online_map" and "&mm->cpu_vm_mask".
1da177e4 791 */
ae583885 792static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
1da177e4
LT
793{
794 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
1da177e4 795
ae583885
DM
796 xcall_deliver(data0, data1, data2, mask);
797}
1da177e4 798
ae583885
DM
799/* Send cross call to all processors except self. */
800static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
801{
802 smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
1da177e4
LT
803}
804
805extern unsigned long xcall_sync_tick;
806
807static void smp_start_sync_tick_client(int cpu)
808{
24445a4a
DM
809 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
810 &cpumask_of_cpu(cpu));
1da177e4
LT
811}
812
1da177e4
LT
813extern unsigned long xcall_call_function;
814
d172ad18 815void arch_send_call_function_ipi(cpumask_t mask)
1da177e4 816{
19926630 817 xcall_deliver((u64) &xcall_call_function, 0, 0, &mask);
d172ad18 818}
1da177e4 819
d172ad18 820extern unsigned long xcall_call_function_single;
1da177e4 821
d172ad18
DM
822void arch_send_call_function_single_ipi(int cpu)
823{
19926630
DM
824 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
825 &cpumask_of_cpu(cpu));
1da177e4
LT
826}
827
828void smp_call_function_client(int irq, struct pt_regs *regs)
829{
d172ad18
DM
830 clear_softint(1 << irq);
831 generic_smp_call_function_interrupt();
832}
1da177e4 833
d172ad18
DM
834void smp_call_function_single_client(int irq, struct pt_regs *regs)
835{
1da177e4 836 clear_softint(1 << irq);
d172ad18 837 generic_smp_call_function_single_interrupt();
1da177e4
LT
838}
839
bd40791e
DM
840static void tsb_sync(void *info)
841{
6f25f398 842 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
bd40791e
DM
843 struct mm_struct *mm = info;
844
6f25f398
DM
845 /* It is not valid to test "currrent->active_mm == mm" here.
846 *
847 * The value of "current" is not changed atomically with
848 * switch_mm(). But that's OK, we just need to check the
849 * current cpu's trap block PGD physical address.
850 */
851 if (tp->pgd_paddr == __pa(mm->pgd))
bd40791e
DM
852 tsb_context_switch(mm);
853}
854
855void smp_tsb_sync(struct mm_struct *mm)
856{
d172ad18 857 smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
bd40791e
DM
858}
859
1da177e4
LT
860extern unsigned long xcall_flush_tlb_mm;
861extern unsigned long xcall_flush_tlb_pending;
862extern unsigned long xcall_flush_tlb_kernel_range;
93dae5b7 863extern unsigned long xcall_fetch_glob_regs;
1da177e4 864extern unsigned long xcall_receive_signal;
ee29074d 865extern unsigned long xcall_new_mmu_context_version;
e2fdd7fd
DM
866#ifdef CONFIG_KGDB
867extern unsigned long xcall_kgdb_capture;
868#endif
1da177e4
LT
869
870#ifdef DCACHE_ALIASING_POSSIBLE
871extern unsigned long xcall_flush_dcache_page_cheetah;
872#endif
873extern unsigned long xcall_flush_dcache_page_spitfire;
874
875#ifdef CONFIG_DEBUG_DCFLUSH
876extern atomic_t dcpage_flushes;
877extern atomic_t dcpage_flushes_xcall;
878#endif
879
d979f179 880static inline void __local_flush_dcache_page(struct page *page)
1da177e4
LT
881{
882#ifdef DCACHE_ALIASING_POSSIBLE
883 __flush_dcache_page(page_address(page),
884 ((tlb_type == spitfire) &&
885 page_mapping(page) != NULL));
886#else
887 if (page_mapping(page) != NULL &&
888 tlb_type == spitfire)
889 __flush_icache_page(__pa(page_address(page)));
890#endif
891}
892
893void smp_flush_dcache_page_impl(struct page *page, int cpu)
894{
a43fe0e7
DM
895 int this_cpu;
896
897 if (tlb_type == hypervisor)
898 return;
1da177e4
LT
899
900#ifdef CONFIG_DEBUG_DCFLUSH
901 atomic_inc(&dcpage_flushes);
902#endif
a43fe0e7
DM
903
904 this_cpu = get_cpu();
905
1da177e4
LT
906 if (cpu == this_cpu) {
907 __local_flush_dcache_page(page);
908 } else if (cpu_online(cpu)) {
909 void *pg_addr = page_address(page);
622824db 910 u64 data0 = 0;
1da177e4
LT
911
912 if (tlb_type == spitfire) {
622824db 913 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
1da177e4
LT
914 if (page_mapping(page) != NULL)
915 data0 |= ((u64)1 << 32);
a43fe0e7 916 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4 917#ifdef DCACHE_ALIASING_POSSIBLE
622824db 918 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1da177e4
LT
919#endif
920 }
622824db
DM
921 if (data0) {
922 xcall_deliver(data0, __pa(pg_addr),
ae583885 923 (u64) pg_addr, &cpumask_of_cpu(cpu));
1da177e4 924#ifdef CONFIG_DEBUG_DCFLUSH
622824db 925 atomic_inc(&dcpage_flushes_xcall);
1da177e4 926#endif
622824db 927 }
1da177e4
LT
928 }
929
930 put_cpu();
931}
932
933void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
934{
622824db 935 void *pg_addr;
a43fe0e7 936 int this_cpu;
622824db 937 u64 data0;
a43fe0e7
DM
938
939 if (tlb_type == hypervisor)
940 return;
941
942 this_cpu = get_cpu();
1da177e4 943
1da177e4
LT
944#ifdef CONFIG_DEBUG_DCFLUSH
945 atomic_inc(&dcpage_flushes);
946#endif
622824db
DM
947 data0 = 0;
948 pg_addr = page_address(page);
1da177e4
LT
949 if (tlb_type == spitfire) {
950 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
951 if (page_mapping(page) != NULL)
952 data0 |= ((u64)1 << 32);
a43fe0e7 953 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
954#ifdef DCACHE_ALIASING_POSSIBLE
955 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1da177e4
LT
956#endif
957 }
622824db
DM
958 if (data0) {
959 xcall_deliver(data0, __pa(pg_addr),
ae583885 960 (u64) pg_addr, &cpu_online_map);
1da177e4 961#ifdef CONFIG_DEBUG_DCFLUSH
622824db 962 atomic_inc(&dcpage_flushes_xcall);
1da177e4 963#endif
622824db 964 }
1da177e4
LT
965 __local_flush_dcache_page(page);
966
967 put_cpu();
968}
969
ee29074d 970void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1da177e4 971{
a0663a79 972 struct mm_struct *mm;
ee29074d 973 unsigned long flags;
a0663a79 974
1da177e4 975 clear_softint(1 << irq);
a0663a79
DM
976
977 /* See if we need to allocate a new TLB context because
978 * the version of the one we are using is now out of date.
979 */
980 mm = current->active_mm;
ee29074d
DM
981 if (unlikely(!mm || (mm == &init_mm)))
982 return;
a0663a79 983
ee29074d 984 spin_lock_irqsave(&mm->context.lock, flags);
aac0aadf 985
ee29074d
DM
986 if (unlikely(!CTX_VALID(mm->context)))
987 get_new_mmu_context(mm);
aac0aadf 988
ee29074d 989 spin_unlock_irqrestore(&mm->context.lock, flags);
aac0aadf 990
ee29074d
DM
991 load_secondary_context(mm);
992 __flush_tlb_mm(CTX_HWBITS(mm->context),
993 SECONDARY_CONTEXT);
a0663a79
DM
994}
995
996void smp_new_mmu_context_version(void)
997{
ee29074d 998 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1da177e4
LT
999}
1000
e2fdd7fd
DM
1001#ifdef CONFIG_KGDB
1002void kgdb_roundup_cpus(unsigned long flags)
1003{
1004 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1005}
1006#endif
1007
93dae5b7
DM
1008void smp_fetch_global_regs(void)
1009{
1010 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1011}
93dae5b7 1012
1da177e4
LT
1013/* We know that the window frames of the user have been flushed
1014 * to the stack before we get here because all callers of us
1015 * are flush_tlb_*() routines, and these run after flush_cache_*()
1016 * which performs the flushw.
1017 *
1018 * The SMP TLB coherency scheme we use works as follows:
1019 *
1020 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1021 * space has (potentially) executed on, this is the heuristic
1022 * we use to avoid doing cross calls.
1023 *
1024 * Also, for flushing from kswapd and also for clones, we
1025 * use cpu_vm_mask as the list of cpus to make run the TLB.
1026 *
1027 * 2) TLB context numbers are shared globally across all processors
1028 * in the system, this allows us to play several games to avoid
1029 * cross calls.
1030 *
1031 * One invariant is that when a cpu switches to a process, and
1032 * that processes tsk->active_mm->cpu_vm_mask does not have the
1033 * current cpu's bit set, that tlb context is flushed locally.
1034 *
1035 * If the address space is non-shared (ie. mm->count == 1) we avoid
1036 * cross calls when we want to flush the currently running process's
1037 * tlb state. This is done by clearing all cpu bits except the current
1038 * processor's in current->active_mm->cpu_vm_mask and performing the
1039 * flush locally only. This will force any subsequent cpus which run
1040 * this task to flush the context from the local tlb if the process
1041 * migrates to another cpu (again).
1042 *
1043 * 3) For shared address spaces (threads) and swapping we bite the
1044 * bullet for most cases and perform the cross call (but only to
1045 * the cpus listed in cpu_vm_mask).
1046 *
1047 * The performance gain from "optimizing" away the cross call for threads is
1048 * questionable (in theory the big win for threads is the massive sharing of
1049 * address space state across processors).
1050 */
62dbec78
DM
1051
1052/* This currently is only used by the hugetlb arch pre-fault
1053 * hook on UltraSPARC-III+ and later when changing the pagesize
1054 * bits of the context register for an address space.
1055 */
1da177e4
LT
1056void smp_flush_tlb_mm(struct mm_struct *mm)
1057{
62dbec78
DM
1058 u32 ctx = CTX_HWBITS(mm->context);
1059 int cpu = get_cpu();
1da177e4 1060
62dbec78
DM
1061 if (atomic_read(&mm->mm_users) == 1) {
1062 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1063 goto local_flush_and_out;
1064 }
1da177e4 1065
62dbec78
DM
1066 smp_cross_call_masked(&xcall_flush_tlb_mm,
1067 ctx, 0, 0,
91a4231c 1068 &mm->cpu_vm_mask);
1da177e4 1069
62dbec78
DM
1070local_flush_and_out:
1071 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1da177e4 1072
62dbec78 1073 put_cpu();
1da177e4
LT
1074}
1075
1076void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1077{
1078 u32 ctx = CTX_HWBITS(mm->context);
1079 int cpu = get_cpu();
1080
dedeb002 1081 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1da177e4 1082 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
dedeb002
HD
1083 else
1084 smp_cross_call_masked(&xcall_flush_tlb_pending,
1085 ctx, nr, (unsigned long) vaddrs,
91a4231c 1086 &mm->cpu_vm_mask);
1da177e4 1087
1da177e4
LT
1088 __flush_tlb_pending(ctx, nr, vaddrs);
1089
1090 put_cpu();
1091}
1092
1093void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1094{
1095 start &= PAGE_MASK;
1096 end = PAGE_ALIGN(end);
1097 if (start != end) {
1098 smp_cross_call(&xcall_flush_tlb_kernel_range,
1099 0, start, end);
1100
1101 __flush_tlb_kernel_range(start, end);
1102 }
1103}
1104
1105/* CPU capture. */
1106/* #define CAPTURE_DEBUG */
1107extern unsigned long xcall_capture;
1108
1109static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1110static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1111static unsigned long penguins_are_doing_time;
1112
1113void smp_capture(void)
1114{
1115 int result = atomic_add_ret(1, &smp_capture_depth);
1116
1117 if (result == 1) {
1118 int ncpus = num_online_cpus();
1119
1120#ifdef CAPTURE_DEBUG
1121 printk("CPU[%d]: Sending penguins to jail...",
1122 smp_processor_id());
1123#endif
1124 penguins_are_doing_time = 1;
4f07118f 1125 membar_storestore_loadstore();
1da177e4
LT
1126 atomic_inc(&smp_capture_registry);
1127 smp_cross_call(&xcall_capture, 0, 0, 0);
1128 while (atomic_read(&smp_capture_registry) != ncpus)
4f07118f 1129 rmb();
1da177e4
LT
1130#ifdef CAPTURE_DEBUG
1131 printk("done\n");
1132#endif
1133 }
1134}
1135
1136void smp_release(void)
1137{
1138 if (atomic_dec_and_test(&smp_capture_depth)) {
1139#ifdef CAPTURE_DEBUG
1140 printk("CPU[%d]: Giving pardon to "
1141 "imprisoned penguins\n",
1142 smp_processor_id());
1143#endif
1144 penguins_are_doing_time = 0;
4f07118f 1145 membar_storeload_storestore();
1da177e4
LT
1146 atomic_dec(&smp_capture_registry);
1147 }
1148}
1149
1150/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1151 * can service tlb flush xcalls...
1152 */
1153extern void prom_world(int);
96c6e0d8 1154
1da177e4
LT
1155void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1156{
1da177e4
LT
1157 clear_softint(1 << irq);
1158
1159 preempt_disable();
1160
1161 __asm__ __volatile__("flushw");
1da177e4
LT
1162 prom_world(1);
1163 atomic_inc(&smp_capture_registry);
4f07118f 1164 membar_storeload_storestore();
1da177e4 1165 while (penguins_are_doing_time)
4f07118f 1166 rmb();
1da177e4
LT
1167 atomic_dec(&smp_capture_registry);
1168 prom_world(0);
1169
1170 preempt_enable();
1171}
1172
1da177e4 1173/* /proc/profile writes can call this, don't __init it please. */
1da177e4
LT
1174int setup_profiling_timer(unsigned int multiplier)
1175{
777a4475 1176 return -EINVAL;
1da177e4
LT
1177}
1178
1179void __init smp_prepare_cpus(unsigned int max_cpus)
1180{
1da177e4
LT
1181}
1182
5cbc3073 1183void __devinit smp_prepare_boot_cpu(void)
7abea921 1184{
7abea921
DM
1185}
1186
5e0797e5
DM
1187void __init smp_setup_processor_id(void)
1188{
1189 if (tlb_type == spitfire)
deb16999 1190 xcall_deliver_impl = spitfire_xcall_deliver;
5e0797e5 1191 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
deb16999 1192 xcall_deliver_impl = cheetah_xcall_deliver;
5e0797e5 1193 else
deb16999 1194 xcall_deliver_impl = hypervisor_xcall_deliver;
5e0797e5
DM
1195}
1196
5cbc3073 1197void __devinit smp_fill_in_sib_core_maps(void)
1da177e4 1198{
5cbc3073
DM
1199 unsigned int i;
1200
e0204409 1201 for_each_present_cpu(i) {
5cbc3073
DM
1202 unsigned int j;
1203
39dd992a 1204 cpus_clear(cpu_core_map[i]);
5cbc3073 1205 if (cpu_data(i).core_id == 0) {
f78eae2e 1206 cpu_set(i, cpu_core_map[i]);
5cbc3073
DM
1207 continue;
1208 }
1209
e0204409 1210 for_each_present_cpu(j) {
5cbc3073
DM
1211 if (cpu_data(i).core_id ==
1212 cpu_data(j).core_id)
f78eae2e
DM
1213 cpu_set(j, cpu_core_map[i]);
1214 }
1215 }
1216
e0204409 1217 for_each_present_cpu(i) {
f78eae2e
DM
1218 unsigned int j;
1219
d5a7430d 1220 cpus_clear(per_cpu(cpu_sibling_map, i));
f78eae2e 1221 if (cpu_data(i).proc_id == -1) {
d5a7430d 1222 cpu_set(i, per_cpu(cpu_sibling_map, i));
f78eae2e
DM
1223 continue;
1224 }
1225
e0204409 1226 for_each_present_cpu(j) {
f78eae2e
DM
1227 if (cpu_data(i).proc_id ==
1228 cpu_data(j).proc_id)
d5a7430d 1229 cpu_set(j, per_cpu(cpu_sibling_map, i));
5cbc3073
DM
1230 }
1231 }
1da177e4
LT
1232}
1233
b282b6f8 1234int __cpuinit __cpu_up(unsigned int cpu)
1da177e4
LT
1235{
1236 int ret = smp_boot_one_cpu(cpu);
1237
1238 if (!ret) {
1239 cpu_set(cpu, smp_commenced_mask);
1240 while (!cpu_isset(cpu, cpu_online_map))
1241 mb();
1242 if (!cpu_isset(cpu, cpu_online_map)) {
1243 ret = -ENODEV;
1244 } else {
02fead75
DM
1245 /* On SUN4V, writes to %tick and %stick are
1246 * not allowed.
1247 */
1248 if (tlb_type != hypervisor)
1249 smp_synchronize_one_tick(cpu);
1da177e4
LT
1250 }
1251 }
1252 return ret;
1253}
1254
4f0234f4 1255#ifdef CONFIG_HOTPLUG_CPU
e0204409
DM
1256void cpu_play_dead(void)
1257{
1258 int cpu = smp_processor_id();
1259 unsigned long pstate;
1260
1261 idle_task_exit();
1262
1263 if (tlb_type == hypervisor) {
1264 struct trap_per_cpu *tb = &trap_block[cpu];
1265
1266 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1267 tb->cpu_mondo_pa, 0);
1268 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1269 tb->dev_mondo_pa, 0);
1270 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1271 tb->resum_mondo_pa, 0);
1272 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1273 tb->nonresum_mondo_pa, 0);
1274 }
1275
1276 cpu_clear(cpu, smp_commenced_mask);
1277 membar_safe("#Sync");
1278
1279 local_irq_disable();
1280
1281 __asm__ __volatile__(
1282 "rdpr %%pstate, %0\n\t"
1283 "wrpr %0, %1, %%pstate"
1284 : "=r" (pstate)
1285 : "i" (PSTATE_IE));
1286
1287 while (1)
1288 barrier();
1289}
1290
4f0234f4
DM
1291int __cpu_disable(void)
1292{
e0204409
DM
1293 int cpu = smp_processor_id();
1294 cpuinfo_sparc *c;
1295 int i;
1296
1297 for_each_cpu_mask(i, cpu_core_map[cpu])
1298 cpu_clear(cpu, cpu_core_map[i]);
1299 cpus_clear(cpu_core_map[cpu]);
1300
d5a7430d
MT
1301 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1302 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1303 cpus_clear(per_cpu(cpu_sibling_map, cpu));
e0204409
DM
1304
1305 c = &cpu_data(cpu);
1306
1307 c->core_id = 0;
1308 c->proc_id = -1;
1309
e0204409
DM
1310 smp_wmb();
1311
1312 /* Make sure no interrupts point to this cpu. */
1313 fixup_irqs();
1314
1315 local_irq_enable();
1316 mdelay(1);
1317 local_irq_disable();
1318
4d084617
PM
1319 ipi_call_lock();
1320 cpu_clear(cpu, cpu_online_map);
1321 ipi_call_unlock();
1322
e0204409 1323 return 0;
4f0234f4
DM
1324}
1325
1326void __cpu_die(unsigned int cpu)
1327{
e0204409
DM
1328 int i;
1329
1330 for (i = 0; i < 100; i++) {
1331 smp_rmb();
1332 if (!cpu_isset(cpu, smp_commenced_mask))
1333 break;
1334 msleep(100);
1335 }
1336 if (cpu_isset(cpu, smp_commenced_mask)) {
1337 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1338 } else {
1339#if defined(CONFIG_SUN_LDOMS)
1340 unsigned long hv_err;
1341 int limit = 100;
1342
1343 do {
1344 hv_err = sun4v_cpu_stop(cpu);
1345 if (hv_err == HV_EOK) {
1346 cpu_clear(cpu, cpu_present_map);
1347 break;
1348 }
1349 } while (--limit > 0);
1350 if (limit <= 0) {
1351 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1352 hv_err);
1353 }
1354#endif
1355 }
4f0234f4
DM
1356}
1357#endif
1358
1da177e4
LT
1359void __init smp_cpus_done(unsigned int max_cpus)
1360{
1da177e4
LT
1361}
1362
1da177e4
LT
1363void smp_send_reschedule(int cpu)
1364{
19926630
DM
1365 xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1366 &cpumask_of_cpu(cpu));
1367}
1368
1369void smp_receive_signal_client(int irq, struct pt_regs *regs)
1370{
1371 clear_softint(1 << irq);
1da177e4
LT
1372}
1373
1374/* This is a nop because we capture all other cpus
1375 * anyways when making the PROM active.
1376 */
1377void smp_send_stop(void)
1378{
1379}
1380
d369ddd2
DM
1381unsigned long __per_cpu_base __read_mostly;
1382unsigned long __per_cpu_shift __read_mostly;
1da177e4
LT
1383
1384EXPORT_SYMBOL(__per_cpu_base);
1385EXPORT_SYMBOL(__per_cpu_shift);
1386
5cbc3073 1387void __init real_setup_per_cpu_areas(void)
1da177e4 1388{
b9709456 1389 unsigned long paddr, goal, size, i;
1da177e4 1390 char *ptr;
1da177e4
LT
1391
1392 /* Copy section for each CPU (we discard the original) */
5a089006
DM
1393 goal = PERCPU_ENOUGH_ROOM;
1394
b6e3590f
JF
1395 __per_cpu_shift = PAGE_SHIFT;
1396 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1da177e4
LT
1397 __per_cpu_shift++;
1398
b9709456
DM
1399 paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
1400 if (!paddr) {
1401 prom_printf("Cannot allocate per-cpu memory.\n");
1402 prom_halt();
1403 }
1da177e4 1404
b9709456 1405 ptr = __va(paddr);
1da177e4
LT
1406 __per_cpu_base = ptr - __per_cpu_start;
1407
1da177e4
LT
1408 for (i = 0; i < NR_CPUS; i++, ptr += size)
1409 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
951bc82c
DM
1410
1411 /* Setup %g5 for the boot cpu. */
1412 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1da177e4 1413}