1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <asm/alternative.h>
11 #include <asm/sections.h>
12 #include <asm/pgtable.h>
15 #include <asm/vsyscall.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
19 #include <asm/fixmap.h>
21 #define MAX_PATCH_LEN (255-1)
23 #ifdef CONFIG_HOTPLUG_CPU
24 static int smp_alt_once;
26 static int __init bootonly(char *str)
31 __setup("smp-alt-boot", bootonly);
33 #define smp_alt_once 1
36 static int __initdata_or_module debug_alternative;
38 static int __init debug_alt(char *str)
40 debug_alternative = 1;
43 __setup("debug-alternative", debug_alt);
45 static int noreplace_smp;
47 static int __init setup_noreplace_smp(char *str)
52 __setup("noreplace-smp", setup_noreplace_smp);
54 #ifdef CONFIG_PARAVIRT
55 static int __initdata_or_module noreplace_paravirt = 0;
57 static int __init setup_noreplace_paravirt(char *str)
59 noreplace_paravirt = 1;
62 __setup("noreplace-paravirt", setup_noreplace_paravirt);
65 #define DPRINTK(fmt, args...) if (debug_alternative) \
66 printk(KERN_DEBUG fmt, args)
68 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
69 /* Use inline assembly to define this because the nops are defined
70 as inline assembly strings in the include files and we cannot
71 get them easily into strings. */
72 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
73 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
74 GENERIC_NOP7 GENERIC_NOP8
76 extern const unsigned char intelnops[];
77 static const unsigned char *const __initconst_or_module
78 intel_nops[ASM_NOP_MAX+1] = {
83 intelnops + 1 + 2 + 3,
84 intelnops + 1 + 2 + 3 + 4,
85 intelnops + 1 + 2 + 3 + 4 + 5,
86 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
87 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
92 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
93 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
96 extern const unsigned char k8nops[];
97 static const unsigned char *const __initconst_or_module
98 k8_nops[ASM_NOP_MAX+1] = {
104 k8nops + 1 + 2 + 3 + 4,
105 k8nops + 1 + 2 + 3 + 4 + 5,
106 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
107 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
112 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
113 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
116 extern const unsigned char k7nops[];
117 static const unsigned char *const __initconst_or_module
118 k7_nops[ASM_NOP_MAX+1] = {
124 k7nops + 1 + 2 + 3 + 4,
125 k7nops + 1 + 2 + 3 + 4 + 5,
126 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
127 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
132 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
133 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
136 extern const unsigned char p6nops[];
137 static const unsigned char *const __initconst_or_module
138 p6_nops[ASM_NOP_MAX+1] = {
144 p6nops + 1 + 2 + 3 + 4,
145 p6nops + 1 + 2 + 3 + 4 + 5,
146 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
147 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
153 extern char __vsyscall_0;
154 static const unsigned char *const *__init_or_module find_nop_table(void)
156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
157 boot_cpu_has(X86_FEATURE_NOPL))
163 #else /* CONFIG_X86_64 */
165 static const unsigned char *const *__init_or_module find_nop_table(void)
167 if (boot_cpu_has(X86_FEATURE_K8))
169 else if (boot_cpu_has(X86_FEATURE_K7))
171 else if (boot_cpu_has(X86_FEATURE_NOPL))
177 #endif /* CONFIG_X86_64 */
179 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
180 static void __init_or_module add_nops(void *insns, unsigned int len)
182 const unsigned char *const *noptable = find_nop_table();
185 unsigned int noplen = len;
186 if (noplen > ASM_NOP_MAX)
187 noplen = ASM_NOP_MAX;
188 memcpy(insns, noptable[noplen], noplen);
194 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
195 extern u8 *__smp_locks[], *__smp_locks_end[];
196 static void *text_poke_early(void *addr, const void *opcode, size_t len);
198 /* Replace instructions with better alternatives for this CPU type.
199 This runs before SMP is initialized to avoid SMP problems with
200 self modifying code. This implies that assymetric systems where
201 APs have less capabilities than the boot processor are not handled.
202 Tough. Make sure you disable such features by hand. */
204 void __init_or_module apply_alternatives(struct alt_instr *start,
205 struct alt_instr *end)
208 u8 insnbuf[MAX_PATCH_LEN];
210 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
211 for (a = start; a < end; a++) {
212 u8 *instr = a->instr;
213 BUG_ON(a->replacementlen > a->instrlen);
214 BUG_ON(a->instrlen > sizeof(insnbuf));
215 if (!boot_cpu_has(a->cpuid))
218 /* vsyscall code is not mapped yet. resolve it manually. */
219 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
220 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
221 DPRINTK("%s: vsyscall fixup: %p => %p\n",
222 __func__, a->instr, instr);
225 memcpy(insnbuf, a->replacement, a->replacementlen);
226 if (*insnbuf == 0xe8 && a->replacementlen == 5)
227 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
228 add_nops(insnbuf + a->replacementlen,
229 a->instrlen - a->replacementlen);
230 text_poke_early(instr, insnbuf, a->instrlen);
236 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
240 mutex_lock(&text_mutex);
241 for (ptr = start; ptr < end; ptr++) {
246 /* turn DS segment override prefix into lock prefix */
247 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
249 mutex_unlock(&text_mutex);
252 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
259 mutex_lock(&text_mutex);
260 for (ptr = start; ptr < end; ptr++) {
265 /* turn lock prefix into DS segment override prefix */
266 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
268 mutex_unlock(&text_mutex);
271 struct smp_alt_module {
272 /* what is this ??? */
276 /* ptrs to lock prefixes */
280 /* .text segment, needed to avoid patching init code ;) */
284 struct list_head next;
286 static LIST_HEAD(smp_alt_modules);
287 static DEFINE_MUTEX(smp_alt);
288 static int smp_mode = 1; /* protected by smp_alt */
290 void __init_or_module alternatives_smp_module_add(struct module *mod,
292 void *locks, void *locks_end,
293 void *text, void *text_end)
295 struct smp_alt_module *smp;
301 if (boot_cpu_has(X86_FEATURE_UP))
302 alternatives_smp_unlock(locks, locks_end,
307 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
309 return; /* we'll run the (safe but slow) SMP code then ... */
314 smp->locks_end = locks_end;
316 smp->text_end = text_end;
317 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
318 __func__, smp->locks, smp->locks_end,
319 smp->text, smp->text_end, smp->name);
321 mutex_lock(&smp_alt);
322 list_add_tail(&smp->next, &smp_alt_modules);
323 if (boot_cpu_has(X86_FEATURE_UP))
324 alternatives_smp_unlock(smp->locks, smp->locks_end,
325 smp->text, smp->text_end);
326 mutex_unlock(&smp_alt);
329 void __init_or_module alternatives_smp_module_del(struct module *mod)
331 struct smp_alt_module *item;
333 if (smp_alt_once || noreplace_smp)
336 mutex_lock(&smp_alt);
337 list_for_each_entry(item, &smp_alt_modules, next) {
338 if (mod != item->mod)
340 list_del(&item->next);
341 mutex_unlock(&smp_alt);
342 DPRINTK("%s: %s\n", __func__, item->name);
346 mutex_unlock(&smp_alt);
349 void alternatives_smp_switch(int smp)
351 struct smp_alt_module *mod;
353 #ifdef CONFIG_LOCKDEP
355 * Older binutils section handling bug prevented
356 * alternatives-replacement from working reliably.
358 * If this still occurs then you should see a hang
359 * or crash shortly after this line:
361 printk("lockdep: fixing up alternatives.\n");
364 if (noreplace_smp || smp_alt_once)
366 BUG_ON(!smp && (num_online_cpus() > 1));
368 mutex_lock(&smp_alt);
371 * Avoid unnecessary switches because it forces JIT based VMs to
372 * throw away all cached translations, which can be quite costly.
374 if (smp == smp_mode) {
377 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
378 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
379 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
380 list_for_each_entry(mod, &smp_alt_modules, next)
381 alternatives_smp_lock(mod->locks, mod->locks_end,
382 mod->text, mod->text_end);
384 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
385 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
386 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
387 list_for_each_entry(mod, &smp_alt_modules, next)
388 alternatives_smp_unlock(mod->locks, mod->locks_end,
389 mod->text, mod->text_end);
392 mutex_unlock(&smp_alt);
397 #ifdef CONFIG_PARAVIRT
398 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
399 struct paravirt_patch_site *end)
401 struct paravirt_patch_site *p;
402 char insnbuf[MAX_PATCH_LEN];
404 if (noreplace_paravirt)
407 for (p = start; p < end; p++) {
410 BUG_ON(p->len > MAX_PATCH_LEN);
411 /* prep the buffer with the original instructions */
412 memcpy(insnbuf, p->instr, p->len);
413 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
414 (unsigned long)p->instr, p->len);
416 BUG_ON(used > p->len);
418 /* Pad the rest with nops */
419 add_nops(insnbuf + used, p->len - used);
420 text_poke_early(p->instr, insnbuf, p->len);
423 extern struct paravirt_patch_site __start_parainstructions[],
424 __stop_parainstructions[];
425 #endif /* CONFIG_PARAVIRT */
427 void __init alternative_instructions(void)
429 /* The patching is not fully atomic, so try to avoid local interruptions
430 that might execute the to be patched code.
431 Other CPUs are not running. */
435 * Don't stop machine check exceptions while patching.
436 * MCEs only happen when something got corrupted and in this
437 * case we must do something about the corruption.
438 * Ignoring it is worse than a unlikely patching race.
439 * Also machine checks tend to be broadcast and if one CPU
440 * goes into machine check the others follow quickly, so we don't
441 * expect a machine check to cause undue problems during to code
445 apply_alternatives(__alt_instructions, __alt_instructions_end);
447 /* switch to patch-once-at-boottime-only mode and free the
448 * tables in case we know the number of CPUs will never ever
450 #ifdef CONFIG_HOTPLUG_CPU
451 if (num_possible_cpus() < 2)
457 if (1 == num_possible_cpus()) {
458 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
459 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
460 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
462 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
466 alternatives_smp_module_add(NULL, "core kernel",
467 __smp_locks, __smp_locks_end,
470 /* Only switch to UP mode if we don't immediately boot others */
471 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
472 alternatives_smp_switch(0);
475 apply_paravirt(__parainstructions, __parainstructions_end);
478 free_init_pages("SMP alternatives",
479 (unsigned long)__smp_locks,
480 (unsigned long)__smp_locks_end);
486 * text_poke_early - Update instructions on a live kernel at boot time
487 * @addr: address to modify
488 * @opcode: source of the copy
489 * @len: length to copy
491 * When you use this code to patch more than one byte of an instruction
492 * you need to make sure that other CPUs cannot execute this code in parallel.
493 * Also no thread must be currently preempted in the middle of these
494 * instructions. And on the local CPU you need to be protected again NMI or MCE
495 * handlers seeing an inconsistent instruction while you patch.
497 static void *__init_or_module text_poke_early(void *addr, const void *opcode,
501 local_irq_save(flags);
502 memcpy(addr, opcode, len);
504 local_irq_restore(flags);
505 /* Could also do a CLFLUSH here to speed up CPU recovery; but
506 that causes hangs on some VIA CPUs. */
511 * text_poke - Update instructions on a live kernel
512 * @addr: address to modify
513 * @opcode: source of the copy
514 * @len: length to copy
516 * Only atomic text poke/set should be allowed when not doing early patching.
517 * It means the size must be writable atomically and the address must be aligned
518 * in a way that permits an atomic write. It also makes sure we fit on a single
521 * Note: Must be called under text_mutex.
523 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
527 struct page *pages[2];
530 if (!core_kernel_text((unsigned long)addr)) {
531 pages[0] = vmalloc_to_page(addr);
532 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
534 pages[0] = virt_to_page(addr);
535 WARN_ON(!PageReserved(pages[0]));
536 pages[1] = virt_to_page(addr + PAGE_SIZE);
539 local_irq_save(flags);
540 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
542 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
543 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
544 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
545 clear_fixmap(FIX_TEXT_POKE0);
547 clear_fixmap(FIX_TEXT_POKE1);
550 /* Could also do a CLFLUSH here to speed up CPU recovery; but
551 that causes hangs on some VIA CPUs. */
552 for (i = 0; i < len; i++)
553 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
554 local_irq_restore(flags);