]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/kernel/smp.c | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
8 | * Heiko Carstens (heiko.carstens@de.ibm.com) | |
9 | * | |
10 | * based on other smp stuff by | |
11 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | |
12 | * (c) 1998 Ingo Molnar | |
13 | * | |
14 | * We work with logical cpu numbering everywhere we can. The only | |
15 | * functions using the real cpu address (got from STAP) are the sigp | |
16 | * functions. For all other functions we use the identity mapping. | |
17 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | |
18 | * used e.g. to find the idle task belonging to a logical cpu. Every array | |
19 | * in the kernel is sorted by the logical cpu number and not by the physical | |
20 | * one which is causing all the confusion with __cpu_logical_map and | |
21 | * cpu_number_map in other architectures. | |
22 | */ | |
23 | ||
24 | #include <linux/module.h> | |
25 | #include <linux/init.h> | |
26 | ||
27 | #include <linux/mm.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/kernel_stat.h> | |
30 | #include <linux/smp_lock.h> | |
31 | ||
32 | #include <linux/delay.h> | |
33 | #include <linux/cache.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/cpu.h> | |
36 | ||
37 | #include <asm/sigp.h> | |
38 | #include <asm/pgalloc.h> | |
39 | #include <asm/irq.h> | |
40 | #include <asm/s390_ext.h> | |
41 | #include <asm/cpcmd.h> | |
42 | #include <asm/tlbflush.h> | |
43 | ||
44 | /* prototypes */ | |
45 | ||
46 | extern volatile int __cpu_logical_map[]; | |
47 | ||
48 | /* | |
49 | * An array with a pointer the lowcore of every CPU. | |
50 | */ | |
51 | ||
52 | struct _lowcore *lowcore_ptr[NR_CPUS]; | |
53 | ||
54 | cpumask_t cpu_online_map; | |
55 | cpumask_t cpu_possible_map; | |
56 | ||
57 | static struct task_struct *current_set[NR_CPUS]; | |
58 | ||
59 | EXPORT_SYMBOL(cpu_online_map); | |
60 | ||
61 | /* | |
62 | * Reboot, halt and power_off routines for SMP. | |
63 | */ | |
64 | extern char vmhalt_cmd[]; | |
65 | extern char vmpoff_cmd[]; | |
66 | ||
67 | extern void reipl(unsigned long devno); | |
c782268b | 68 | extern void reipl_diag(void); |
1da177e4 LT |
69 | |
70 | static void smp_ext_bitcall(int, ec_bit_sig); | |
71 | static void smp_ext_bitcall_others(ec_bit_sig); | |
72 | ||
73 | /* | |
74 | * Structure and data for smp_call_function(). This is designed to minimise | |
75 | * static memory requirements. It also looks cleaner. | |
76 | */ | |
77 | static DEFINE_SPINLOCK(call_lock); | |
78 | ||
79 | struct call_data_struct { | |
80 | void (*func) (void *info); | |
81 | void *info; | |
82 | atomic_t started; | |
83 | atomic_t finished; | |
84 | int wait; | |
85 | }; | |
86 | ||
87 | static struct call_data_struct * call_data; | |
88 | ||
89 | /* | |
90 | * 'Call function' interrupt callback | |
91 | */ | |
92 | static void do_call_function(void) | |
93 | { | |
94 | void (*func) (void *info) = call_data->func; | |
95 | void *info = call_data->info; | |
96 | int wait = call_data->wait; | |
97 | ||
98 | atomic_inc(&call_data->started); | |
99 | (*func)(info); | |
100 | if (wait) | |
101 | atomic_inc(&call_data->finished); | |
102 | } | |
103 | ||
104 | /* | |
105 | * this function sends a 'generic call function' IPI to all other CPUs | |
106 | * in the system. | |
107 | */ | |
108 | ||
109 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
110 | int wait) | |
111 | /* | |
112 | * [SUMMARY] Run a function on all other CPUs. | |
113 | * <func> The function to run. This must be fast and non-blocking. | |
114 | * <info> An arbitrary pointer to pass to the function. | |
115 | * <nonatomic> currently unused. | |
116 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | |
117 | * [RETURNS] 0 on success, else a negative status code. Does not return until | |
118 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
119 | * | |
120 | * You must not call this function with disabled interrupts or from a | |
121 | * hardware interrupt handler or from a bottom half handler. | |
122 | */ | |
123 | { | |
124 | struct call_data_struct data; | |
125 | int cpus = num_online_cpus()-1; | |
126 | ||
127 | if (cpus <= 0) | |
128 | return 0; | |
129 | ||
130 | /* Can deadlock when called with interrupts disabled */ | |
131 | WARN_ON(irqs_disabled()); | |
132 | ||
133 | data.func = func; | |
134 | data.info = info; | |
135 | atomic_set(&data.started, 0); | |
136 | data.wait = wait; | |
137 | if (wait) | |
138 | atomic_set(&data.finished, 0); | |
139 | ||
140 | spin_lock(&call_lock); | |
141 | call_data = &data; | |
142 | /* Send a message to all other CPUs and wait for them to respond */ | |
143 | smp_ext_bitcall_others(ec_call_function); | |
144 | ||
145 | /* Wait for response */ | |
146 | while (atomic_read(&data.started) != cpus) | |
147 | cpu_relax(); | |
148 | ||
149 | if (wait) | |
150 | while (atomic_read(&data.finished) != cpus) | |
151 | cpu_relax(); | |
152 | spin_unlock(&call_lock); | |
153 | ||
154 | return 0; | |
155 | } | |
156 | ||
157 | /* | |
158 | * Call a function on one CPU | |
159 | * cpu : the CPU the function should be executed on | |
160 | * | |
161 | * You must not call this function with disabled interrupts or from a | |
162 | * hardware interrupt handler. You may call it from a bottom half. | |
163 | * | |
164 | * It is guaranteed that the called function runs on the specified CPU, | |
165 | * preemption is disabled. | |
166 | */ | |
167 | int smp_call_function_on(void (*func) (void *info), void *info, | |
168 | int nonatomic, int wait, int cpu) | |
169 | { | |
170 | struct call_data_struct data; | |
171 | int curr_cpu; | |
172 | ||
173 | if (!cpu_online(cpu)) | |
174 | return -EINVAL; | |
175 | ||
176 | /* disable preemption for local function call */ | |
177 | curr_cpu = get_cpu(); | |
178 | ||
179 | if (curr_cpu == cpu) { | |
180 | /* direct call to function */ | |
181 | func(info); | |
182 | put_cpu(); | |
183 | return 0; | |
184 | } | |
185 | ||
186 | data.func = func; | |
187 | data.info = info; | |
188 | atomic_set(&data.started, 0); | |
189 | data.wait = wait; | |
190 | if (wait) | |
191 | atomic_set(&data.finished, 0); | |
192 | ||
193 | spin_lock_bh(&call_lock); | |
194 | call_data = &data; | |
195 | smp_ext_bitcall(cpu, ec_call_function); | |
196 | ||
197 | /* Wait for response */ | |
198 | while (atomic_read(&data.started) != 1) | |
199 | cpu_relax(); | |
200 | ||
201 | if (wait) | |
202 | while (atomic_read(&data.finished) != 1) | |
203 | cpu_relax(); | |
204 | ||
205 | spin_unlock_bh(&call_lock); | |
206 | put_cpu(); | |
207 | return 0; | |
208 | } | |
209 | EXPORT_SYMBOL(smp_call_function_on); | |
210 | ||
211 | static inline void do_send_stop(void) | |
212 | { | |
213 | int cpu, rc; | |
214 | ||
215 | /* stop all processors */ | |
216 | for_each_online_cpu(cpu) { | |
217 | if (cpu == smp_processor_id()) | |
218 | continue; | |
219 | do { | |
220 | rc = signal_processor(cpu, sigp_stop); | |
221 | } while (rc == sigp_busy); | |
222 | } | |
223 | } | |
224 | ||
225 | static inline void do_store_status(void) | |
226 | { | |
227 | int cpu, rc; | |
228 | ||
229 | /* store status of all processors in their lowcores (real 0) */ | |
230 | for_each_online_cpu(cpu) { | |
231 | if (cpu == smp_processor_id()) | |
232 | continue; | |
233 | do { | |
234 | rc = signal_processor_p( | |
235 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, | |
236 | sigp_store_status_at_address); | |
237 | } while(rc == sigp_busy); | |
238 | } | |
239 | } | |
240 | ||
241 | /* | |
242 | * this function sends a 'stop' sigp to all other CPUs in the system. | |
243 | * it goes straight through. | |
244 | */ | |
245 | void smp_send_stop(void) | |
246 | { | |
247 | /* write magic number to zero page (absolute 0) */ | |
248 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | |
249 | ||
250 | /* stop other processors. */ | |
251 | do_send_stop(); | |
252 | ||
253 | /* store status of other processors. */ | |
254 | do_store_status(); | |
255 | } | |
256 | ||
257 | /* | |
258 | * Reboot, halt and power_off routines for SMP. | |
259 | */ | |
260 | ||
261 | static void do_machine_restart(void * __unused) | |
262 | { | |
263 | int cpu; | |
264 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
265 | ||
973bd993 | 266 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) |
1da177e4 LT |
267 | signal_processor(smp_processor_id(), sigp_stop); |
268 | ||
269 | /* Wait for all other cpus to enter stopped state */ | |
270 | for_each_online_cpu(cpu) { | |
271 | if (cpu == smp_processor_id()) | |
272 | continue; | |
273 | while(!smp_cpu_not_running(cpu)) | |
274 | cpu_relax(); | |
275 | } | |
276 | ||
277 | /* Store status of other cpus. */ | |
278 | do_store_status(); | |
279 | ||
280 | /* | |
281 | * Finally call reipl. Because we waited for all other | |
282 | * cpus to enter this function we know that they do | |
283 | * not hold any s390irq-locks (the cpus have been | |
284 | * interrupted by an external interrupt and s390irq | |
285 | * locks are always held disabled). | |
286 | */ | |
c782268b VS |
287 | reipl_diag(); |
288 | ||
1da177e4 | 289 | if (MACHINE_IS_VM) |
6b979de3 | 290 | cpcmd ("IPL", NULL, 0, NULL); |
1da177e4 LT |
291 | else |
292 | reipl (0x10000 | S390_lowcore.ipl_device); | |
293 | } | |
294 | ||
295 | void machine_restart_smp(char * __unused) | |
296 | { | |
297 | on_each_cpu(do_machine_restart, NULL, 0, 0); | |
298 | } | |
299 | ||
300 | static void do_wait_for_stop(void) | |
301 | { | |
302 | unsigned long cr[16]; | |
303 | ||
304 | __ctl_store(cr, 0, 15); | |
305 | cr[0] &= ~0xffff; | |
306 | cr[6] = 0; | |
307 | __ctl_load(cr, 0, 15); | |
308 | for (;;) | |
309 | enabled_wait(); | |
310 | } | |
311 | ||
312 | static void do_machine_halt(void * __unused) | |
313 | { | |
314 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
315 | ||
973bd993 | 316 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { |
1da177e4 LT |
317 | smp_send_stop(); |
318 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | |
6b979de3 | 319 | cpcmd(vmhalt_cmd, NULL, 0, NULL); |
1da177e4 LT |
320 | signal_processor(smp_processor_id(), |
321 | sigp_stop_and_store_status); | |
322 | } | |
323 | do_wait_for_stop(); | |
324 | } | |
325 | ||
326 | void machine_halt_smp(void) | |
327 | { | |
328 | on_each_cpu(do_machine_halt, NULL, 0, 0); | |
329 | } | |
330 | ||
331 | static void do_machine_power_off(void * __unused) | |
332 | { | |
333 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
334 | ||
973bd993 | 335 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { |
1da177e4 LT |
336 | smp_send_stop(); |
337 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | |
6b979de3 | 338 | cpcmd(vmpoff_cmd, NULL, 0, NULL); |
1da177e4 LT |
339 | signal_processor(smp_processor_id(), |
340 | sigp_stop_and_store_status); | |
341 | } | |
342 | do_wait_for_stop(); | |
343 | } | |
344 | ||
345 | void machine_power_off_smp(void) | |
346 | { | |
347 | on_each_cpu(do_machine_power_off, NULL, 0, 0); | |
348 | } | |
349 | ||
350 | /* | |
351 | * This is the main routine where commands issued by other | |
352 | * cpus are handled. | |
353 | */ | |
354 | ||
355 | void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) | |
356 | { | |
357 | unsigned long bits; | |
358 | ||
359 | /* | |
360 | * handle bit signal external calls | |
361 | * | |
362 | * For the ec_schedule signal we have to do nothing. All the work | |
363 | * is done automatically when we return from the interrupt. | |
364 | */ | |
365 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | |
366 | ||
367 | if (test_bit(ec_call_function, &bits)) | |
368 | do_call_function(); | |
369 | } | |
370 | ||
371 | /* | |
372 | * Send an external call sigp to another cpu and return without waiting | |
373 | * for its completion. | |
374 | */ | |
375 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |
376 | { | |
377 | /* | |
378 | * Set signaling bit in lowcore of target cpu and kick it | |
379 | */ | |
380 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | |
99b2d8df | 381 | while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
1da177e4 LT |
382 | udelay(10); |
383 | } | |
384 | ||
385 | /* | |
386 | * Send an external call sigp to every other cpu in the system and | |
387 | * return without waiting for its completion. | |
388 | */ | |
389 | static void smp_ext_bitcall_others(ec_bit_sig sig) | |
390 | { | |
391 | int cpu; | |
392 | ||
393 | for_each_online_cpu(cpu) { | |
394 | if (cpu == smp_processor_id()) | |
395 | continue; | |
396 | /* | |
397 | * Set signaling bit in lowcore of target cpu and kick it | |
398 | */ | |
399 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | |
99b2d8df | 400 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
1da177e4 LT |
401 | udelay(10); |
402 | } | |
403 | } | |
404 | ||
347a8dc3 | 405 | #ifndef CONFIG_64BIT |
1da177e4 LT |
406 | /* |
407 | * this function sends a 'purge tlb' signal to another CPU. | |
408 | */ | |
409 | void smp_ptlb_callback(void *info) | |
410 | { | |
411 | local_flush_tlb(); | |
412 | } | |
413 | ||
414 | void smp_ptlb_all(void) | |
415 | { | |
416 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | |
417 | } | |
418 | EXPORT_SYMBOL(smp_ptlb_all); | |
347a8dc3 | 419 | #endif /* ! CONFIG_64BIT */ |
1da177e4 LT |
420 | |
421 | /* | |
422 | * this function sends a 'reschedule' IPI to another CPU. | |
423 | * it goes straight through and wastes no time serializing | |
424 | * anything. Worst case is that we lose a reschedule ... | |
425 | */ | |
426 | void smp_send_reschedule(int cpu) | |
427 | { | |
428 | smp_ext_bitcall(cpu, ec_schedule); | |
429 | } | |
430 | ||
431 | /* | |
432 | * parameter area for the set/clear control bit callbacks | |
433 | */ | |
434 | typedef struct | |
435 | { | |
436 | __u16 start_ctl; | |
437 | __u16 end_ctl; | |
438 | unsigned long orvals[16]; | |
439 | unsigned long andvals[16]; | |
440 | } ec_creg_mask_parms; | |
441 | ||
442 | /* | |
443 | * callback for setting/clearing control bits | |
444 | */ | |
445 | void smp_ctl_bit_callback(void *info) { | |
446 | ec_creg_mask_parms *pp; | |
447 | unsigned long cregs[16]; | |
448 | int i; | |
449 | ||
450 | pp = (ec_creg_mask_parms *) info; | |
451 | __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | |
452 | for (i = pp->start_ctl; i <= pp->end_ctl; i++) | |
453 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | |
454 | __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | |
455 | } | |
456 | ||
457 | /* | |
458 | * Set a bit in a control register of all cpus | |
459 | */ | |
460 | void smp_ctl_set_bit(int cr, int bit) { | |
461 | ec_creg_mask_parms parms; | |
462 | ||
463 | parms.start_ctl = cr; | |
464 | parms.end_ctl = cr; | |
465 | parms.orvals[cr] = 1 << bit; | |
466 | parms.andvals[cr] = -1L; | |
467 | preempt_disable(); | |
468 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | |
469 | __ctl_set_bit(cr, bit); | |
470 | preempt_enable(); | |
471 | } | |
472 | ||
473 | /* | |
474 | * Clear a bit in a control register of all cpus | |
475 | */ | |
476 | void smp_ctl_clear_bit(int cr, int bit) { | |
477 | ec_creg_mask_parms parms; | |
478 | ||
479 | parms.start_ctl = cr; | |
480 | parms.end_ctl = cr; | |
481 | parms.orvals[cr] = 0; | |
482 | parms.andvals[cr] = ~(1L << bit); | |
483 | preempt_disable(); | |
484 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | |
485 | __ctl_clear_bit(cr, bit); | |
486 | preempt_enable(); | |
487 | } | |
488 | ||
489 | /* | |
490 | * Lets check how many CPUs we have. | |
491 | */ | |
492 | ||
493 | void | |
494 | __init smp_check_cpus(unsigned int max_cpus) | |
495 | { | |
496 | int cpu, num_cpus; | |
497 | __u16 boot_cpu_addr; | |
498 | ||
499 | /* | |
500 | * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. | |
501 | */ | |
502 | ||
503 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | |
504 | current_thread_info()->cpu = 0; | |
505 | num_cpus = 1; | |
506 | for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { | |
507 | if ((__u16) cpu == boot_cpu_addr) | |
508 | continue; | |
509 | __cpu_logical_map[num_cpus] = (__u16) cpu; | |
510 | if (signal_processor(num_cpus, sigp_sense) == | |
511 | sigp_not_operational) | |
512 | continue; | |
513 | cpu_set(num_cpus, cpu_present_map); | |
514 | num_cpus++; | |
515 | } | |
516 | ||
517 | for (cpu = 1; cpu < max_cpus; cpu++) | |
518 | cpu_set(cpu, cpu_possible_map); | |
519 | ||
520 | printk("Detected %d CPU's\n",(int) num_cpus); | |
521 | printk("Boot cpu address %2X\n", boot_cpu_addr); | |
522 | } | |
523 | ||
524 | /* | |
525 | * Activate a secondary processor. | |
526 | */ | |
527 | extern void init_cpu_timer(void); | |
528 | extern void init_cpu_vtimer(void); | |
529 | extern int pfault_init(void); | |
530 | extern void pfault_fini(void); | |
531 | ||
532 | int __devinit start_secondary(void *cpuvoid) | |
533 | { | |
534 | /* Setup the cpu */ | |
535 | cpu_init(); | |
5bfb5d69 | 536 | preempt_disable(); |
1da177e4 LT |
537 | /* init per CPU timer */ |
538 | init_cpu_timer(); | |
539 | #ifdef CONFIG_VIRT_TIMER | |
540 | init_cpu_vtimer(); | |
541 | #endif | |
542 | #ifdef CONFIG_PFAULT | |
543 | /* Enable pfault pseudo page faults on this cpu. */ | |
5d3f229f HC |
544 | if (MACHINE_IS_VM) |
545 | pfault_init(); | |
1da177e4 LT |
546 | #endif |
547 | /* Mark this cpu as online */ | |
548 | cpu_set(smp_processor_id(), cpu_online_map); | |
549 | /* Switch on interrupts */ | |
550 | local_irq_enable(); | |
551 | /* Print info about this processor */ | |
552 | print_cpu_info(&S390_lowcore.cpu_data); | |
553 | /* cpu_idle will call schedule for us */ | |
554 | cpu_idle(); | |
555 | return 0; | |
556 | } | |
557 | ||
558 | static void __init smp_create_idle(unsigned int cpu) | |
559 | { | |
560 | struct task_struct *p; | |
561 | ||
562 | /* | |
563 | * don't care about the psw and regs settings since we'll never | |
564 | * reschedule the forked task. | |
565 | */ | |
566 | p = fork_idle(cpu); | |
567 | if (IS_ERR(p)) | |
568 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
569 | current_set[cpu] = p; | |
570 | } | |
571 | ||
572 | /* Reserving and releasing of CPUs */ | |
573 | ||
574 | static DEFINE_SPINLOCK(smp_reserve_lock); | |
575 | static int smp_cpu_reserved[NR_CPUS]; | |
576 | ||
577 | int | |
578 | smp_get_cpu(cpumask_t cpu_mask) | |
579 | { | |
580 | unsigned long flags; | |
581 | int cpu; | |
582 | ||
583 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
584 | /* Try to find an already reserved cpu. */ | |
585 | for_each_cpu_mask(cpu, cpu_mask) { | |
586 | if (smp_cpu_reserved[cpu] != 0) { | |
587 | smp_cpu_reserved[cpu]++; | |
588 | /* Found one. */ | |
589 | goto out; | |
590 | } | |
591 | } | |
592 | /* Reserve a new cpu from cpu_mask. */ | |
593 | for_each_cpu_mask(cpu, cpu_mask) { | |
594 | if (cpu_online(cpu)) { | |
595 | smp_cpu_reserved[cpu]++; | |
596 | goto out; | |
597 | } | |
598 | } | |
599 | cpu = -ENODEV; | |
600 | out: | |
601 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
602 | return cpu; | |
603 | } | |
604 | ||
605 | void | |
606 | smp_put_cpu(int cpu) | |
607 | { | |
608 | unsigned long flags; | |
609 | ||
610 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
611 | smp_cpu_reserved[cpu]--; | |
612 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
613 | } | |
614 | ||
615 | static inline int | |
616 | cpu_stopped(int cpu) | |
617 | { | |
618 | __u32 status; | |
619 | ||
620 | /* Check for stopped state */ | |
621 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { | |
622 | if (status & 0x40) | |
623 | return 1; | |
624 | } | |
625 | return 0; | |
626 | } | |
627 | ||
628 | /* Upping and downing of CPUs */ | |
629 | ||
630 | int | |
631 | __cpu_up(unsigned int cpu) | |
632 | { | |
633 | struct task_struct *idle; | |
634 | struct _lowcore *cpu_lowcore; | |
635 | struct stack_frame *sf; | |
636 | sigp_ccode ccode; | |
637 | int curr_cpu; | |
638 | ||
639 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | |
640 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | |
641 | if (cpu_stopped(cpu)) | |
642 | break; | |
643 | } | |
644 | ||
645 | if (!cpu_stopped(cpu)) | |
646 | return -ENODEV; | |
647 | ||
648 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | |
649 | cpu, sigp_set_prefix); | |
650 | if (ccode){ | |
651 | printk("sigp_set_prefix failed for cpu %d " | |
652 | "with condition code %d\n", | |
653 | (int) cpu, (int) ccode); | |
654 | return -EIO; | |
655 | } | |
656 | ||
657 | idle = current_set[cpu]; | |
658 | cpu_lowcore = lowcore_ptr[cpu]; | |
659 | cpu_lowcore->kernel_stack = (unsigned long) | |
660 | idle->thread_info + (THREAD_SIZE); | |
661 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | |
662 | - sizeof(struct pt_regs) | |
663 | - sizeof(struct stack_frame)); | |
664 | memset(sf, 0, sizeof(struct stack_frame)); | |
665 | sf->gprs[9] = (unsigned long) sf; | |
666 | cpu_lowcore->save_area[15] = (unsigned long) sf; | |
667 | __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); | |
668 | __asm__ __volatile__("stam 0,15,0(%0)" | |
669 | : : "a" (&cpu_lowcore->access_regs_save_area) | |
670 | : "memory"); | |
671 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | |
672 | cpu_lowcore->current_task = (unsigned long) idle; | |
673 | cpu_lowcore->cpu_data.cpu_nr = cpu; | |
674 | eieio(); | |
675 | signal_processor(cpu,sigp_restart); | |
676 | ||
677 | while (!cpu_online(cpu)) | |
678 | cpu_relax(); | |
679 | return 0; | |
680 | } | |
681 | ||
682 | int | |
683 | __cpu_disable(void) | |
684 | { | |
685 | unsigned long flags; | |
686 | ec_creg_mask_parms cr_parms; | |
f3705136 | 687 | int cpu = smp_processor_id(); |
1da177e4 LT |
688 | |
689 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
f3705136 | 690 | if (smp_cpu_reserved[cpu] != 0) { |
1da177e4 LT |
691 | spin_unlock_irqrestore(&smp_reserve_lock, flags); |
692 | return -EBUSY; | |
693 | } | |
f3705136 | 694 | cpu_clear(cpu, cpu_online_map); |
1da177e4 LT |
695 | |
696 | #ifdef CONFIG_PFAULT | |
697 | /* Disable pfault pseudo page faults on this cpu. */ | |
5d3f229f HC |
698 | if (MACHINE_IS_VM) |
699 | pfault_fini(); | |
1da177e4 LT |
700 | #endif |
701 | ||
702 | /* disable all external interrupts */ | |
703 | ||
704 | cr_parms.start_ctl = 0; | |
705 | cr_parms.end_ctl = 0; | |
706 | cr_parms.orvals[0] = 0; | |
707 | cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | | |
708 | 1<<11 | 1<<10 | 1<< 6 | 1<< 4); | |
709 | smp_ctl_bit_callback(&cr_parms); | |
710 | ||
711 | /* disable all I/O interrupts */ | |
712 | ||
713 | cr_parms.start_ctl = 6; | |
714 | cr_parms.end_ctl = 6; | |
715 | cr_parms.orvals[6] = 0; | |
716 | cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | | |
717 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | |
718 | smp_ctl_bit_callback(&cr_parms); | |
719 | ||
720 | /* disable most machine checks */ | |
721 | ||
722 | cr_parms.start_ctl = 14; | |
723 | cr_parms.end_ctl = 14; | |
724 | cr_parms.orvals[14] = 0; | |
725 | cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | |
726 | smp_ctl_bit_callback(&cr_parms); | |
727 | ||
728 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
729 | return 0; | |
730 | } | |
731 | ||
732 | void | |
733 | __cpu_die(unsigned int cpu) | |
734 | { | |
735 | /* Wait until target cpu is down */ | |
736 | while (!smp_cpu_not_running(cpu)) | |
737 | cpu_relax(); | |
738 | printk("Processor %d spun down\n", cpu); | |
739 | } | |
740 | ||
741 | void | |
742 | cpu_die(void) | |
743 | { | |
744 | idle_task_exit(); | |
745 | signal_processor(smp_processor_id(), sigp_stop); | |
746 | BUG(); | |
747 | for(;;); | |
748 | } | |
749 | ||
750 | /* | |
751 | * Cycle through the processors and setup structures. | |
752 | */ | |
753 | ||
754 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
755 | { | |
756 | unsigned long stack; | |
757 | unsigned int cpu; | |
758 | int i; | |
759 | ||
99b2d8df HC |
760 | /* request the 0x1201 emergency signal external interrupt */ |
761 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | |
762 | panic("Couldn't request external interrupt 0x1201"); | |
1da177e4 LT |
763 | smp_check_cpus(max_cpus); |
764 | memset(lowcore_ptr,0,sizeof(lowcore_ptr)); | |
765 | /* | |
766 | * Initialize prefix pages and stacks for all possible cpus | |
767 | */ | |
768 | print_cpu_info(&S390_lowcore.cpu_data); | |
769 | ||
770 | for(i = 0; i < NR_CPUS; i++) { | |
771 | if (!cpu_possible(i)) | |
772 | continue; | |
773 | lowcore_ptr[i] = (struct _lowcore *) | |
774 | __get_free_pages(GFP_KERNEL|GFP_DMA, | |
775 | sizeof(void*) == 8 ? 1 : 0); | |
776 | stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); | |
777 | if (lowcore_ptr[i] == NULL || stack == 0ULL) | |
778 | panic("smp_boot_cpus failed to allocate memory\n"); | |
779 | ||
780 | *(lowcore_ptr[i]) = S390_lowcore; | |
781 | lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); | |
1da177e4 LT |
782 | stack = __get_free_pages(GFP_KERNEL,0); |
783 | if (stack == 0ULL) | |
784 | panic("smp_boot_cpus failed to allocate memory\n"); | |
785 | lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); | |
347a8dc3 | 786 | #ifndef CONFIG_64BIT |
77fa2245 HC |
787 | if (MACHINE_HAS_IEEE) { |
788 | lowcore_ptr[i]->extended_save_area_addr = | |
789 | (__u32) __get_free_pages(GFP_KERNEL,0); | |
790 | if (lowcore_ptr[i]->extended_save_area_addr == 0) | |
791 | panic("smp_boot_cpus failed to " | |
792 | "allocate memory\n"); | |
793 | } | |
1da177e4 LT |
794 | #endif |
795 | } | |
347a8dc3 | 796 | #ifndef CONFIG_64BIT |
77fa2245 HC |
797 | if (MACHINE_HAS_IEEE) |
798 | ctl_set_bit(14, 29); /* enable extended save area */ | |
799 | #endif | |
1da177e4 LT |
800 | set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); |
801 | ||
802 | for_each_cpu(cpu) | |
803 | if (cpu != smp_processor_id()) | |
804 | smp_create_idle(cpu); | |
805 | } | |
806 | ||
807 | void __devinit smp_prepare_boot_cpu(void) | |
808 | { | |
809 | BUG_ON(smp_processor_id() != 0); | |
810 | ||
811 | cpu_set(0, cpu_online_map); | |
812 | cpu_set(0, cpu_present_map); | |
813 | cpu_set(0, cpu_possible_map); | |
814 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | |
815 | current_set[0] = current; | |
816 | } | |
817 | ||
818 | void smp_cpus_done(unsigned int max_cpus) | |
819 | { | |
820 | cpu_present_map = cpu_possible_map; | |
821 | } | |
822 | ||
823 | /* | |
824 | * the frequency of the profiling timer can be changed | |
825 | * by writing a multiplier value into /proc/profile. | |
826 | * | |
827 | * usually you want to run this on all CPUs ;) | |
828 | */ | |
829 | int setup_profiling_timer(unsigned int multiplier) | |
830 | { | |
831 | return 0; | |
832 | } | |
833 | ||
834 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | |
835 | ||
836 | static int __init topology_init(void) | |
837 | { | |
838 | int cpu; | |
839 | int ret; | |
840 | ||
841 | for_each_cpu(cpu) { | |
842 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | |
843 | if (ret) | |
844 | printk(KERN_WARNING "topology_init: register_cpu %d " | |
845 | "failed (%d)\n", cpu, ret); | |
846 | } | |
847 | return 0; | |
848 | } | |
849 | ||
850 | subsys_initcall(topology_init); | |
851 | ||
852 | EXPORT_SYMBOL(cpu_possible_map); | |
853 | EXPORT_SYMBOL(lowcore_ptr); | |
854 | EXPORT_SYMBOL(smp_ctl_set_bit); | |
855 | EXPORT_SYMBOL(smp_ctl_clear_bit); | |
856 | EXPORT_SYMBOL(smp_call_function); | |
857 | EXPORT_SYMBOL(smp_get_cpu); | |
858 | EXPORT_SYMBOL(smp_put_cpu); | |
859 |