]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/s390/kernel/smp.c
[S390] smp: introduce LC_ORDER and simplify lowcore handling
[net-next-2.6.git] / arch / s390 / kernel / smp.c
1 /*
2  *  arch/s390/kernel/smp.c
3  *
4  *    Copyright IBM Corp. 1999, 2009
5  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *               Heiko Carstens (heiko.carstens@de.ibm.com)
8  *
9  *  based on other smp stuff by
10  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11  *    (c) 1998 Ingo Molnar
12  *
13  * We work with logical cpu numbering everywhere we can. The only
14  * functions using the real cpu address (got from STAP) are the sigp
15  * functions. For all other functions we use the identity mapping.
16  * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17  * used e.g. to find the idle task belonging to a logical cpu. Every array
18  * in the kernel is sorted by the logical cpu number and not by the physical
19  * one which is causing all the confusion with __cpu_logical_map and
20  * cpu_number_map in other architectures.
21  */
22
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/mm.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqflags.h>
36 #include <linux/cpu.h>
37 #include <linux/timex.h>
38 #include <linux/bootmem.h>
39 #include <asm/ipl.h>
40 #include <asm/setup.h>
41 #include <asm/sigp.h>
42 #include <asm/pgalloc.h>
43 #include <asm/irq.h>
44 #include <asm/s390_ext.h>
45 #include <asm/cpcmd.h>
46 #include <asm/tlbflush.h>
47 #include <asm/timer.h>
48 #include <asm/lowcore.h>
49 #include <asm/sclp.h>
50 #include <asm/cputime.h>
51 #include <asm/vdso.h>
52 #include <asm/cpu.h>
53 #include "entry.h"
54
55 static struct task_struct *current_set[NR_CPUS];
56
57 static u8 smp_cpu_type;
58 static int smp_use_sigp_detection;
59
60 enum s390_cpu_state {
61         CPU_STATE_STANDBY,
62         CPU_STATE_CONFIGURED,
63 };
64
65 DEFINE_MUTEX(smp_cpu_state_mutex);
66 int smp_cpu_polarization[NR_CPUS];
67 static int smp_cpu_state[NR_CPUS];
68 static int cpu_management;
69
70 static DEFINE_PER_CPU(struct cpu, cpu_devices);
71
72 static void smp_ext_bitcall(int, ec_bit_sig);
73
74 static int cpu_stopped(int cpu)
75 {
76         __u32 status;
77
78         switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
79         case sigp_order_code_accepted:
80         case sigp_status_stored:
81                 /* Check for stopped and check stop state */
82                 if (status & 0x50)
83                         return 1;
84                 break;
85         default:
86                 break;
87         }
88         return 0;
89 }
90
91 void smp_send_stop(void)
92 {
93         int cpu, rc;
94
95         /* Disable all interrupts/machine checks */
96         __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
97         trace_hardirqs_off();
98
99         /* stop all processors */
100         for_each_online_cpu(cpu) {
101                 if (cpu == smp_processor_id())
102                         continue;
103                 do {
104                         rc = signal_processor(cpu, sigp_stop);
105                 } while (rc == sigp_busy);
106
107                 while (!cpu_stopped(cpu))
108                         cpu_relax();
109         }
110 }
111
112 /*
113  * This is the main routine where commands issued by other
114  * cpus are handled.
115  */
116
117 static void do_ext_call_interrupt(__u16 code)
118 {
119         unsigned long bits;
120
121         /*
122          * handle bit signal external calls
123          *
124          * For the ec_schedule signal we have to do nothing. All the work
125          * is done automatically when we return from the interrupt.
126          */
127         bits = xchg(&S390_lowcore.ext_call_fast, 0);
128
129         if (test_bit(ec_call_function, &bits))
130                 generic_smp_call_function_interrupt();
131
132         if (test_bit(ec_call_function_single, &bits))
133                 generic_smp_call_function_single_interrupt();
134 }
135
136 /*
137  * Send an external call sigp to another cpu and return without waiting
138  * for its completion.
139  */
140 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
141 {
142         /*
143          * Set signaling bit in lowcore of target cpu and kick it
144          */
145         set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
146         while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
147                 udelay(10);
148 }
149
150 void arch_send_call_function_ipi(cpumask_t mask)
151 {
152         int cpu;
153
154         for_each_cpu_mask(cpu, mask)
155                 smp_ext_bitcall(cpu, ec_call_function);
156 }
157
158 void arch_send_call_function_single_ipi(int cpu)
159 {
160         smp_ext_bitcall(cpu, ec_call_function_single);
161 }
162
163 #ifndef CONFIG_64BIT
164 /*
165  * this function sends a 'purge tlb' signal to another CPU.
166  */
167 static void smp_ptlb_callback(void *info)
168 {
169         __tlb_flush_local();
170 }
171
172 void smp_ptlb_all(void)
173 {
174         on_each_cpu(smp_ptlb_callback, NULL, 1);
175 }
176 EXPORT_SYMBOL(smp_ptlb_all);
177 #endif /* ! CONFIG_64BIT */
178
179 /*
180  * this function sends a 'reschedule' IPI to another CPU.
181  * it goes straight through and wastes no time serializing
182  * anything. Worst case is that we lose a reschedule ...
183  */
184 void smp_send_reschedule(int cpu)
185 {
186         smp_ext_bitcall(cpu, ec_schedule);
187 }
188
189 /*
190  * parameter area for the set/clear control bit callbacks
191  */
192 struct ec_creg_mask_parms {
193         unsigned long orvals[16];
194         unsigned long andvals[16];
195 };
196
197 /*
198  * callback for setting/clearing control bits
199  */
200 static void smp_ctl_bit_callback(void *info)
201 {
202         struct ec_creg_mask_parms *pp = info;
203         unsigned long cregs[16];
204         int i;
205
206         __ctl_store(cregs, 0, 15);
207         for (i = 0; i <= 15; i++)
208                 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
209         __ctl_load(cregs, 0, 15);
210 }
211
212 /*
213  * Set a bit in a control register of all cpus
214  */
215 void smp_ctl_set_bit(int cr, int bit)
216 {
217         struct ec_creg_mask_parms parms;
218
219         memset(&parms.orvals, 0, sizeof(parms.orvals));
220         memset(&parms.andvals, 0xff, sizeof(parms.andvals));
221         parms.orvals[cr] = 1 << bit;
222         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
223 }
224 EXPORT_SYMBOL(smp_ctl_set_bit);
225
226 /*
227  * Clear a bit in a control register of all cpus
228  */
229 void smp_ctl_clear_bit(int cr, int bit)
230 {
231         struct ec_creg_mask_parms parms;
232
233         memset(&parms.orvals, 0, sizeof(parms.orvals));
234         memset(&parms.andvals, 0xff, sizeof(parms.andvals));
235         parms.andvals[cr] = ~(1L << bit);
236         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
237 }
238 EXPORT_SYMBOL(smp_ctl_clear_bit);
239
240 /*
241  * In early ipl state a temp. logically cpu number is needed, so the sigp
242  * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
243  * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
244  */
245 #define CPU_INIT_NO     1
246
247 #ifdef CONFIG_ZFCPDUMP
248
249 /*
250  * zfcpdump_prefix_array holds prefix registers for the following scenario:
251  * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
252  * save its prefix registers, since they get lost, when switching from 31 bit
253  * to 64 bit.
254  */
255 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
256         __attribute__((__section__(".data")));
257
258 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
259 {
260         if (ipl_info.type != IPL_TYPE_FCP_DUMP)
261                 return;
262         if (cpu >= NR_CPUS) {
263                 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
264                            "the dump\n", cpu, NR_CPUS - 1);
265                 return;
266         }
267         zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
268         __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
269         while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
270                sigp_busy)
271                 cpu_relax();
272         memcpy(zfcpdump_save_areas[cpu],
273                (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
274                SAVE_AREA_SIZE);
275 #ifdef CONFIG_64BIT
276         /* copy original prefix register */
277         zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
278 #endif
279 }
280
281 union save_area *zfcpdump_save_areas[NR_CPUS + 1];
282 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
283
284 #else
285
286 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
287
288 #endif /* CONFIG_ZFCPDUMP */
289
290 static int cpu_known(int cpu_id)
291 {
292         int cpu;
293
294         for_each_present_cpu(cpu) {
295                 if (__cpu_logical_map[cpu] == cpu_id)
296                         return 1;
297         }
298         return 0;
299 }
300
301 static int smp_rescan_cpus_sigp(cpumask_t avail)
302 {
303         int cpu_id, logical_cpu;
304
305         logical_cpu = cpumask_first(&avail);
306         if (logical_cpu >= nr_cpu_ids)
307                 return 0;
308         for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
309                 if (cpu_known(cpu_id))
310                         continue;
311                 __cpu_logical_map[logical_cpu] = cpu_id;
312                 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
313                 if (!cpu_stopped(logical_cpu))
314                         continue;
315                 cpu_set(logical_cpu, cpu_present_map);
316                 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
317                 logical_cpu = cpumask_next(logical_cpu, &avail);
318                 if (logical_cpu >= nr_cpu_ids)
319                         break;
320         }
321         return 0;
322 }
323
324 static int smp_rescan_cpus_sclp(cpumask_t avail)
325 {
326         struct sclp_cpu_info *info;
327         int cpu_id, logical_cpu, cpu;
328         int rc;
329
330         logical_cpu = cpumask_first(&avail);
331         if (logical_cpu >= nr_cpu_ids)
332                 return 0;
333         info = kmalloc(sizeof(*info), GFP_KERNEL);
334         if (!info)
335                 return -ENOMEM;
336         rc = sclp_get_cpu_info(info);
337         if (rc)
338                 goto out;
339         for (cpu = 0; cpu < info->combined; cpu++) {
340                 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
341                         continue;
342                 cpu_id = info->cpu[cpu].address;
343                 if (cpu_known(cpu_id))
344                         continue;
345                 __cpu_logical_map[logical_cpu] = cpu_id;
346                 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
347                 cpu_set(logical_cpu, cpu_present_map);
348                 if (cpu >= info->configured)
349                         smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
350                 else
351                         smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
352                 logical_cpu = cpumask_next(logical_cpu, &avail);
353                 if (logical_cpu >= nr_cpu_ids)
354                         break;
355         }
356 out:
357         kfree(info);
358         return rc;
359 }
360
361 static int __smp_rescan_cpus(void)
362 {
363         cpumask_t avail;
364
365         cpus_xor(avail, cpu_possible_map, cpu_present_map);
366         if (smp_use_sigp_detection)
367                 return smp_rescan_cpus_sigp(avail);
368         else
369                 return smp_rescan_cpus_sclp(avail);
370 }
371
372 static void __init smp_detect_cpus(void)
373 {
374         unsigned int cpu, c_cpus, s_cpus;
375         struct sclp_cpu_info *info;
376         u16 boot_cpu_addr, cpu_addr;
377
378         c_cpus = 1;
379         s_cpus = 0;
380         boot_cpu_addr = __cpu_logical_map[0];
381         info = kmalloc(sizeof(*info), GFP_KERNEL);
382         if (!info)
383                 panic("smp_detect_cpus failed to allocate memory\n");
384         /* Use sigp detection algorithm if sclp doesn't work. */
385         if (sclp_get_cpu_info(info)) {
386                 smp_use_sigp_detection = 1;
387                 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
388                         if (cpu == boot_cpu_addr)
389                                 continue;
390                         __cpu_logical_map[CPU_INIT_NO] = cpu;
391                         if (!cpu_stopped(CPU_INIT_NO))
392                                 continue;
393                         smp_get_save_area(c_cpus, cpu);
394                         c_cpus++;
395                 }
396                 goto out;
397         }
398
399         if (info->has_cpu_type) {
400                 for (cpu = 0; cpu < info->combined; cpu++) {
401                         if (info->cpu[cpu].address == boot_cpu_addr) {
402                                 smp_cpu_type = info->cpu[cpu].type;
403                                 break;
404                         }
405                 }
406         }
407
408         for (cpu = 0; cpu < info->combined; cpu++) {
409                 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
410                         continue;
411                 cpu_addr = info->cpu[cpu].address;
412                 if (cpu_addr == boot_cpu_addr)
413                         continue;
414                 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
415                 if (!cpu_stopped(CPU_INIT_NO)) {
416                         s_cpus++;
417                         continue;
418                 }
419                 smp_get_save_area(c_cpus, cpu_addr);
420                 c_cpus++;
421         }
422 out:
423         kfree(info);
424         pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
425         get_online_cpus();
426         __smp_rescan_cpus();
427         put_online_cpus();
428 }
429
430 /*
431  *      Activate a secondary processor.
432  */
433 int __cpuinit start_secondary(void *cpuvoid)
434 {
435         /* Setup the cpu */
436         cpu_init();
437         preempt_disable();
438         /* Enable TOD clock interrupts on the secondary cpu. */
439         init_cpu_timer();
440         /* Enable cpu timer interrupts on the secondary cpu. */
441         init_cpu_vtimer();
442         /* Enable pfault pseudo page faults on this cpu. */
443         pfault_init();
444
445         /* call cpu notifiers */
446         notify_cpu_starting(smp_processor_id());
447         /* Mark this cpu as online */
448         ipi_call_lock();
449         cpu_set(smp_processor_id(), cpu_online_map);
450         ipi_call_unlock();
451         /* Switch on interrupts */
452         local_irq_enable();
453         /* Print info about this processor */
454         print_cpu_info();
455         /* cpu_idle will call schedule for us */
456         cpu_idle();
457         return 0;
458 }
459
460 static void __init smp_create_idle(unsigned int cpu)
461 {
462         struct task_struct *p;
463
464         /*
465          *  don't care about the psw and regs settings since we'll never
466          *  reschedule the forked task.
467          */
468         p = fork_idle(cpu);
469         if (IS_ERR(p))
470                 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
471         current_set[cpu] = p;
472 }
473
474 static int __cpuinit smp_alloc_lowcore(int cpu)
475 {
476         unsigned long async_stack, panic_stack;
477         struct _lowcore *lowcore;
478
479         lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
480         if (!lowcore)
481                 return -ENOMEM;
482         async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
483         panic_stack = __get_free_page(GFP_KERNEL);
484         if (!panic_stack || !async_stack)
485                 goto out;
486         memcpy(lowcore, &S390_lowcore, 512);
487         memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
488         lowcore->async_stack = async_stack + ASYNC_SIZE;
489         lowcore->panic_stack = panic_stack + PAGE_SIZE;
490
491 #ifndef CONFIG_64BIT
492         if (MACHINE_HAS_IEEE) {
493                 unsigned long save_area;
494
495                 save_area = get_zeroed_page(GFP_KERNEL);
496                 if (!save_area)
497                         goto out;
498                 lowcore->extended_save_area_addr = (u32) save_area;
499         }
500 #else
501         if (vdso_alloc_per_cpu(cpu, lowcore))
502                 goto out;
503 #endif
504         lowcore_ptr[cpu] = lowcore;
505         return 0;
506
507 out:
508         free_page(panic_stack);
509         free_pages(async_stack, ASYNC_ORDER);
510         free_pages((unsigned long) lowcore, LC_ORDER);
511         return -ENOMEM;
512 }
513
514 static void smp_free_lowcore(int cpu)
515 {
516         struct _lowcore *lowcore;
517
518         lowcore = lowcore_ptr[cpu];
519 #ifndef CONFIG_64BIT
520         if (MACHINE_HAS_IEEE)
521                 free_page((unsigned long) lowcore->extended_save_area_addr);
522 #else
523         vdso_free_per_cpu(cpu, lowcore);
524 #endif
525         free_page(lowcore->panic_stack - PAGE_SIZE);
526         free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
527         free_pages((unsigned long) lowcore, LC_ORDER);
528         lowcore_ptr[cpu] = NULL;
529 }
530
531 /* Upping and downing of CPUs */
532 int __cpuinit __cpu_up(unsigned int cpu)
533 {
534         struct task_struct *idle;
535         struct _lowcore *cpu_lowcore;
536         struct stack_frame *sf;
537         sigp_ccode ccode;
538         u32 lowcore;
539
540         if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
541                 return -EIO;
542         if (smp_alloc_lowcore(cpu))
543                 return -ENOMEM;
544         do {
545                 ccode = signal_processor(cpu, sigp_initial_cpu_reset);
546                 if (ccode == sigp_busy)
547                         udelay(10);
548                 if (ccode == sigp_not_operational)
549                         goto err_out;
550         } while (ccode == sigp_busy);
551
552         lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
553         while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
554                 udelay(10);
555
556         idle = current_set[cpu];
557         cpu_lowcore = lowcore_ptr[cpu];
558         cpu_lowcore->kernel_stack = (unsigned long)
559                 task_stack_page(idle) + THREAD_SIZE;
560         cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
561         sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
562                                      - sizeof(struct pt_regs)
563                                      - sizeof(struct stack_frame));
564         memset(sf, 0, sizeof(struct stack_frame));
565         sf->gprs[9] = (unsigned long) sf;
566         cpu_lowcore->save_area[15] = (unsigned long) sf;
567         __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
568         asm volatile(
569                 "       stam    0,15,0(%0)"
570                 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
571         cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
572         cpu_lowcore->current_task = (unsigned long) idle;
573         cpu_lowcore->cpu_nr = cpu;
574         cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
575         cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
576         cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
577         eieio();
578
579         while (signal_processor(cpu, sigp_restart) == sigp_busy)
580                 udelay(10);
581
582         while (!cpu_online(cpu))
583                 cpu_relax();
584         return 0;
585
586 err_out:
587         smp_free_lowcore(cpu);
588         return -EIO;
589 }
590
591 static int __init setup_possible_cpus(char *s)
592 {
593         int pcpus, cpu;
594
595         pcpus = simple_strtoul(s, NULL, 0);
596         init_cpu_possible(cpumask_of(0));
597         for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
598                 set_cpu_possible(cpu, true);
599         return 0;
600 }
601 early_param("possible_cpus", setup_possible_cpus);
602
603 #ifdef CONFIG_HOTPLUG_CPU
604
605 int __cpu_disable(void)
606 {
607         struct ec_creg_mask_parms cr_parms;
608         int cpu = smp_processor_id();
609
610         cpu_clear(cpu, cpu_online_map);
611
612         /* Disable pfault pseudo page faults on this cpu. */
613         pfault_fini();
614
615         memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
616         memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
617
618         /* disable all external interrupts */
619         cr_parms.orvals[0] = 0;
620         cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
621                                 1 << 11 | 1 << 10 | 1 <<  6 | 1 <<  4);
622         /* disable all I/O interrupts */
623         cr_parms.orvals[6] = 0;
624         cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
625                                 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
626         /* disable most machine checks */
627         cr_parms.orvals[14] = 0;
628         cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
629                                  1 << 25 | 1 << 24);
630
631         smp_ctl_bit_callback(&cr_parms);
632
633         return 0;
634 }
635
636 void __cpu_die(unsigned int cpu)
637 {
638         /* Wait until target cpu is down */
639         while (!cpu_stopped(cpu))
640                 cpu_relax();
641         smp_free_lowcore(cpu);
642         pr_info("Processor %d stopped\n", cpu);
643 }
644
645 void cpu_die(void)
646 {
647         idle_task_exit();
648         signal_processor(smp_processor_id(), sigp_stop);
649         BUG();
650         for (;;);
651 }
652
653 #endif /* CONFIG_HOTPLUG_CPU */
654
655 void __init smp_prepare_cpus(unsigned int max_cpus)
656 {
657 #ifndef CONFIG_64BIT
658         unsigned long save_area = 0;
659 #endif
660         unsigned long async_stack, panic_stack;
661         struct _lowcore *lowcore;
662         unsigned int cpu;
663
664         smp_detect_cpus();
665
666         /* request the 0x1201 emergency signal external interrupt */
667         if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
668                 panic("Couldn't request external interrupt 0x1201");
669         print_cpu_info();
670
671         /* Reallocate current lowcore, but keep its contents. */
672         lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
673         panic_stack = __get_free_page(GFP_KERNEL);
674         async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
675         BUG_ON(!lowcore || !panic_stack || !async_stack);
676 #ifndef CONFIG_64BIT
677         if (MACHINE_HAS_IEEE)
678                 save_area = get_zeroed_page(GFP_KERNEL);
679 #endif
680         local_irq_disable();
681         local_mcck_disable();
682         lowcore_ptr[smp_processor_id()] = lowcore;
683         *lowcore = S390_lowcore;
684         lowcore->panic_stack = panic_stack + PAGE_SIZE;
685         lowcore->async_stack = async_stack + ASYNC_SIZE;
686 #ifndef CONFIG_64BIT
687         if (MACHINE_HAS_IEEE)
688                 lowcore->extended_save_area_addr = (u32) save_area;
689 #endif
690         set_prefix((u32)(unsigned long) lowcore);
691         local_mcck_enable();
692         local_irq_enable();
693 #ifdef CONFIG_64BIT
694         if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
695                 BUG();
696 #endif
697         for_each_possible_cpu(cpu)
698                 if (cpu != smp_processor_id())
699                         smp_create_idle(cpu);
700 }
701
702 void __init smp_prepare_boot_cpu(void)
703 {
704         BUG_ON(smp_processor_id() != 0);
705
706         current_thread_info()->cpu = 0;
707         cpu_set(0, cpu_present_map);
708         cpu_set(0, cpu_online_map);
709         S390_lowcore.percpu_offset = __per_cpu_offset[0];
710         current_set[0] = current;
711         smp_cpu_state[0] = CPU_STATE_CONFIGURED;
712         smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
713 }
714
715 void __init smp_cpus_done(unsigned int max_cpus)
716 {
717 }
718
719 /*
720  * the frequency of the profiling timer can be changed
721  * by writing a multiplier value into /proc/profile.
722  *
723  * usually you want to run this on all CPUs ;)
724  */
725 int setup_profiling_timer(unsigned int multiplier)
726 {
727         return 0;
728 }
729
730 #ifdef CONFIG_HOTPLUG_CPU
731 static ssize_t cpu_configure_show(struct sys_device *dev,
732                                 struct sysdev_attribute *attr, char *buf)
733 {
734         ssize_t count;
735
736         mutex_lock(&smp_cpu_state_mutex);
737         count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
738         mutex_unlock(&smp_cpu_state_mutex);
739         return count;
740 }
741
742 static ssize_t cpu_configure_store(struct sys_device *dev,
743                                   struct sysdev_attribute *attr,
744                                   const char *buf, size_t count)
745 {
746         int cpu = dev->id;
747         int val, rc;
748         char delim;
749
750         if (sscanf(buf, "%d %c", &val, &delim) != 1)
751                 return -EINVAL;
752         if (val != 0 && val != 1)
753                 return -EINVAL;
754
755         get_online_cpus();
756         mutex_lock(&smp_cpu_state_mutex);
757         rc = -EBUSY;
758         if (cpu_online(cpu))
759                 goto out;
760         rc = 0;
761         switch (val) {
762         case 0:
763                 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
764                         rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
765                         if (!rc) {
766                                 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
767                                 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
768                         }
769                 }
770                 break;
771         case 1:
772                 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
773                         rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
774                         if (!rc) {
775                                 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
776                                 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
777                         }
778                 }
779                 break;
780         default:
781                 break;
782         }
783 out:
784         mutex_unlock(&smp_cpu_state_mutex);
785         put_online_cpus();
786         return rc ? rc : count;
787 }
788 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
789 #endif /* CONFIG_HOTPLUG_CPU */
790
791 static ssize_t cpu_polarization_show(struct sys_device *dev,
792                                      struct sysdev_attribute *attr, char *buf)
793 {
794         int cpu = dev->id;
795         ssize_t count;
796
797         mutex_lock(&smp_cpu_state_mutex);
798         switch (smp_cpu_polarization[cpu]) {
799         case POLARIZATION_HRZ:
800                 count = sprintf(buf, "horizontal\n");
801                 break;
802         case POLARIZATION_VL:
803                 count = sprintf(buf, "vertical:low\n");
804                 break;
805         case POLARIZATION_VM:
806                 count = sprintf(buf, "vertical:medium\n");
807                 break;
808         case POLARIZATION_VH:
809                 count = sprintf(buf, "vertical:high\n");
810                 break;
811         default:
812                 count = sprintf(buf, "unknown\n");
813                 break;
814         }
815         mutex_unlock(&smp_cpu_state_mutex);
816         return count;
817 }
818 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
819
820 static ssize_t show_cpu_address(struct sys_device *dev,
821                                 struct sysdev_attribute *attr, char *buf)
822 {
823         return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
824 }
825 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
826
827
828 static struct attribute *cpu_common_attrs[] = {
829 #ifdef CONFIG_HOTPLUG_CPU
830         &attr_configure.attr,
831 #endif
832         &attr_address.attr,
833         &attr_polarization.attr,
834         NULL,
835 };
836
837 static struct attribute_group cpu_common_attr_group = {
838         .attrs = cpu_common_attrs,
839 };
840
841 static ssize_t show_capability(struct sys_device *dev,
842                                 struct sysdev_attribute *attr, char *buf)
843 {
844         unsigned int capability;
845         int rc;
846
847         rc = get_cpu_capability(&capability);
848         if (rc)
849                 return rc;
850         return sprintf(buf, "%u\n", capability);
851 }
852 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
853
854 static ssize_t show_idle_count(struct sys_device *dev,
855                                 struct sysdev_attribute *attr, char *buf)
856 {
857         struct s390_idle_data *idle;
858         unsigned long long idle_count;
859         unsigned int sequence;
860
861         idle = &per_cpu(s390_idle, dev->id);
862 repeat:
863         sequence = idle->sequence;
864         smp_rmb();
865         if (sequence & 1)
866                 goto repeat;
867         idle_count = idle->idle_count;
868         if (idle->idle_enter)
869                 idle_count++;
870         smp_rmb();
871         if (idle->sequence != sequence)
872                 goto repeat;
873         return sprintf(buf, "%llu\n", idle_count);
874 }
875 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
876
877 static ssize_t show_idle_time(struct sys_device *dev,
878                                 struct sysdev_attribute *attr, char *buf)
879 {
880         struct s390_idle_data *idle;
881         unsigned long long now, idle_time, idle_enter;
882         unsigned int sequence;
883
884         idle = &per_cpu(s390_idle, dev->id);
885         now = get_clock();
886 repeat:
887         sequence = idle->sequence;
888         smp_rmb();
889         if (sequence & 1)
890                 goto repeat;
891         idle_time = idle->idle_time;
892         idle_enter = idle->idle_enter;
893         if (idle_enter != 0ULL && idle_enter < now)
894                 idle_time += now - idle_enter;
895         smp_rmb();
896         if (idle->sequence != sequence)
897                 goto repeat;
898         return sprintf(buf, "%llu\n", idle_time >> 12);
899 }
900 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
901
902 static struct attribute *cpu_online_attrs[] = {
903         &attr_capability.attr,
904         &attr_idle_count.attr,
905         &attr_idle_time_us.attr,
906         NULL,
907 };
908
909 static struct attribute_group cpu_online_attr_group = {
910         .attrs = cpu_online_attrs,
911 };
912
913 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
914                                     unsigned long action, void *hcpu)
915 {
916         unsigned int cpu = (unsigned int)(long)hcpu;
917         struct cpu *c = &per_cpu(cpu_devices, cpu);
918         struct sys_device *s = &c->sysdev;
919         struct s390_idle_data *idle;
920
921         switch (action) {
922         case CPU_ONLINE:
923         case CPU_ONLINE_FROZEN:
924                 idle = &per_cpu(s390_idle, cpu);
925                 memset(idle, 0, sizeof(struct s390_idle_data));
926                 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
927                         return NOTIFY_BAD;
928                 break;
929         case CPU_DEAD:
930         case CPU_DEAD_FROZEN:
931                 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
932                 break;
933         }
934         return NOTIFY_OK;
935 }
936
937 static struct notifier_block __cpuinitdata smp_cpu_nb = {
938         .notifier_call = smp_cpu_notify,
939 };
940
941 static int __devinit smp_add_present_cpu(int cpu)
942 {
943         struct cpu *c = &per_cpu(cpu_devices, cpu);
944         struct sys_device *s = &c->sysdev;
945         int rc;
946
947         c->hotpluggable = 1;
948         rc = register_cpu(c, cpu);
949         if (rc)
950                 goto out;
951         rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
952         if (rc)
953                 goto out_cpu;
954         if (!cpu_online(cpu))
955                 goto out;
956         rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
957         if (!rc)
958                 return 0;
959         sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
960 out_cpu:
961 #ifdef CONFIG_HOTPLUG_CPU
962         unregister_cpu(c);
963 #endif
964 out:
965         return rc;
966 }
967
968 #ifdef CONFIG_HOTPLUG_CPU
969
970 int __ref smp_rescan_cpus(void)
971 {
972         cpumask_t newcpus;
973         int cpu;
974         int rc;
975
976         get_online_cpus();
977         mutex_lock(&smp_cpu_state_mutex);
978         newcpus = cpu_present_map;
979         rc = __smp_rescan_cpus();
980         if (rc)
981                 goto out;
982         cpus_andnot(newcpus, cpu_present_map, newcpus);
983         for_each_cpu_mask(cpu, newcpus) {
984                 rc = smp_add_present_cpu(cpu);
985                 if (rc)
986                         cpu_clear(cpu, cpu_present_map);
987         }
988         rc = 0;
989 out:
990         mutex_unlock(&smp_cpu_state_mutex);
991         put_online_cpus();
992         if (!cpus_empty(newcpus))
993                 topology_schedule_update();
994         return rc;
995 }
996
997 static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
998                                   size_t count)
999 {
1000         int rc;
1001
1002         rc = smp_rescan_cpus();
1003         return rc ? rc : count;
1004 }
1005 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1006 #endif /* CONFIG_HOTPLUG_CPU */
1007
1008 static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
1009 {
1010         ssize_t count;
1011
1012         mutex_lock(&smp_cpu_state_mutex);
1013         count = sprintf(buf, "%d\n", cpu_management);
1014         mutex_unlock(&smp_cpu_state_mutex);
1015         return count;
1016 }
1017
1018 static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
1019                                  size_t count)
1020 {
1021         int val, rc;
1022         char delim;
1023
1024         if (sscanf(buf, "%d %c", &val, &delim) != 1)
1025                 return -EINVAL;
1026         if (val != 0 && val != 1)
1027                 return -EINVAL;
1028         rc = 0;
1029         get_online_cpus();
1030         mutex_lock(&smp_cpu_state_mutex);
1031         if (cpu_management == val)
1032                 goto out;
1033         rc = topology_set_cpu_management(val);
1034         if (!rc)
1035                 cpu_management = val;
1036 out:
1037         mutex_unlock(&smp_cpu_state_mutex);
1038         put_online_cpus();
1039         return rc ? rc : count;
1040 }
1041 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1042                          dispatching_store);
1043
1044 /*
1045  * If the resume kernel runs on another cpu than the suspended kernel,
1046  * we have to switch the cpu IDs in the logical map.
1047  */
1048 void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id,
1049                                    struct _lowcore *suspend_lowcore)
1050 {
1051         int cpu, suspend_cpu_id, resume_cpu_id;
1052         u32 suspend_phys_cpu_id;
1053
1054         suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr];
1055         suspend_cpu_id = suspend_lowcore->cpu_nr;
1056
1057         for_each_present_cpu(cpu) {
1058                 if (__cpu_logical_map[cpu] == resume_phys_cpu_id) {
1059                         resume_cpu_id = cpu;
1060                         goto found;
1061                 }
1062         }
1063         panic("Could not find resume cpu in logical map.\n");
1064
1065 found:
1066         printk("Resume  cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id);
1067         printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id);
1068
1069         __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id;
1070         __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id;
1071
1072         lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id;
1073 }
1074
1075 u32 smp_get_phys_cpu_id(void)
1076 {
1077         return __cpu_logical_map[smp_processor_id()];
1078 }
1079
1080 static int __init topology_init(void)
1081 {
1082         int cpu;
1083         int rc;
1084
1085         register_cpu_notifier(&smp_cpu_nb);
1086
1087 #ifdef CONFIG_HOTPLUG_CPU
1088         rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1089         if (rc)
1090                 return rc;
1091 #endif
1092         rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1093         if (rc)
1094                 return rc;
1095         for_each_present_cpu(cpu) {
1096                 rc = smp_add_present_cpu(cpu);
1097                 if (rc)
1098                         return rc;
1099         }
1100         return 0;
1101 }
1102 subsys_initcall(topology_init);