]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/alternative.c
x86, alternative: Call stop_machine_text_poke() on all cpus
[net-next-2.6.git] / arch / x86 / kernel / alternative.c
CommitLineData
9a0b5817 1#include <linux/module.h>
f6a57033 2#include <linux/sched.h>
2f1dafe5 3#include <linux/mutex.h>
9a0b5817 4#include <linux/list.h>
8b5a10fc 5#include <linux/stringify.h>
19d36ccd
AK
6#include <linux/kprobes.h>
7#include <linux/mm.h>
8#include <linux/vmalloc.h>
3945dab4 9#include <linux/memory.h>
3d55cc8a 10#include <linux/stop_machine.h>
5a0e3ad6 11#include <linux/slab.h>
9a0b5817
GH
12#include <asm/alternative.h>
13#include <asm/sections.h>
19d36ccd 14#include <asm/pgtable.h>
8f4e956b
AK
15#include <asm/mce.h>
16#include <asm/nmi.h>
b097976e 17#include <asm/vsyscall.h>
e587cadd 18#include <asm/cacheflush.h>
78ff7fae 19#include <asm/tlbflush.h>
e587cadd 20#include <asm/io.h>
78ff7fae 21#include <asm/fixmap.h>
9a0b5817 22
ab144f5e
AK
23#define MAX_PATCH_LEN (255-1)
24
09488165
JB
25#ifdef CONFIG_HOTPLUG_CPU
26static int smp_alt_once;
9a0b5817 27
d167a518
GH
28static int __init bootonly(char *str)
29{
30 smp_alt_once = 1;
31 return 1;
32}
b7fb4af0 33__setup("smp-alt-boot", bootonly);
09488165
JB
34#else
35#define smp_alt_once 1
36#endif
37
8b5a10fc 38static int __initdata_or_module debug_alternative;
b7fb4af0 39
d167a518
GH
40static int __init debug_alt(char *str)
41{
42 debug_alternative = 1;
43 return 1;
44}
d167a518
GH
45__setup("debug-alternative", debug_alt);
46
09488165
JB
47static int noreplace_smp;
48
b7fb4af0
JF
49static int __init setup_noreplace_smp(char *str)
50{
51 noreplace_smp = 1;
52 return 1;
53}
54__setup("noreplace-smp", setup_noreplace_smp);
55
959b4fdf 56#ifdef CONFIG_PARAVIRT
8b5a10fc 57static int __initdata_or_module noreplace_paravirt = 0;
959b4fdf
JF
58
59static int __init setup_noreplace_paravirt(char *str)
60{
61 noreplace_paravirt = 1;
62 return 1;
63}
64__setup("noreplace-paravirt", setup_noreplace_paravirt);
65#endif
b7fb4af0 66
d167a518
GH
67#define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
69
8b5a10fc 70#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
9a0b5817
GH
71/* Use inline assembly to define this because the nops are defined
72 as inline assembly strings in the include files and we cannot
73 get them easily into strings. */
8b5a10fc 74asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
9a0b5817 75 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
f4be31ec
SR
76 GENERIC_NOP7 GENERIC_NOP8
77 "\t.previous");
121d7bf5 78extern const unsigned char intelnops[];
8b5a10fc
JB
79static const unsigned char *const __initconst_or_module
80intel_nops[ASM_NOP_MAX+1] = {
9a0b5817
GH
81 NULL,
82 intelnops,
83 intelnops + 1,
84 intelnops + 1 + 2,
85 intelnops + 1 + 2 + 3,
86 intelnops + 1 + 2 + 3 + 4,
87 intelnops + 1 + 2 + 3 + 4 + 5,
88 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
89 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
90};
d167a518
GH
91#endif
92
93#ifdef K8_NOP1
8b5a10fc 94asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
d167a518 95 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
f4be31ec
SR
96 K8_NOP7 K8_NOP8
97 "\t.previous");
121d7bf5 98extern const unsigned char k8nops[];
8b5a10fc
JB
99static const unsigned char *const __initconst_or_module
100k8_nops[ASM_NOP_MAX+1] = {
9a0b5817
GH
101 NULL,
102 k8nops,
103 k8nops + 1,
104 k8nops + 1 + 2,
105 k8nops + 1 + 2 + 3,
106 k8nops + 1 + 2 + 3 + 4,
107 k8nops + 1 + 2 + 3 + 4 + 5,
108 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
109 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
110};
d167a518
GH
111#endif
112
8b5a10fc
JB
113#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
114asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
d167a518 115 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
f4be31ec
SR
116 K7_NOP7 K7_NOP8
117 "\t.previous");
121d7bf5 118extern const unsigned char k7nops[];
8b5a10fc
JB
119static const unsigned char *const __initconst_or_module
120k7_nops[ASM_NOP_MAX+1] = {
9a0b5817
GH
121 NULL,
122 k7nops,
123 k7nops + 1,
124 k7nops + 1 + 2,
125 k7nops + 1 + 2 + 3,
126 k7nops + 1 + 2 + 3 + 4,
127 k7nops + 1 + 2 + 3 + 4 + 5,
128 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
129 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
130};
d167a518
GH
131#endif
132
32c464f5 133#ifdef P6_NOP1
8b5a10fc 134asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
32c464f5 135 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
f4be31ec
SR
136 P6_NOP7 P6_NOP8
137 "\t.previous");
32c464f5 138extern const unsigned char p6nops[];
8b5a10fc
JB
139static const unsigned char *const __initconst_or_module
140p6_nops[ASM_NOP_MAX+1] = {
32c464f5
JB
141 NULL,
142 p6nops,
143 p6nops + 1,
144 p6nops + 1 + 2,
145 p6nops + 1 + 2 + 3,
146 p6nops + 1 + 2 + 3 + 4,
147 p6nops + 1 + 2 + 3 + 4 + 5,
148 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
149 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
150};
151#endif
152
d167a518
GH
153#ifdef CONFIG_X86_64
154
155extern char __vsyscall_0;
8b5a10fc 156static const unsigned char *const *__init_or_module find_nop_table(void)
d167a518 157{
f31d731e
PA
158 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159 boot_cpu_has(X86_FEATURE_NOPL))
160 return p6_nops;
161 else
162 return k8_nops;
d167a518
GH
163}
164
165#else /* CONFIG_X86_64 */
166
8b5a10fc 167static const unsigned char *const *__init_or_module find_nop_table(void)
9a0b5817 168{
f31d731e
PA
169 if (boot_cpu_has(X86_FEATURE_K8))
170 return k8_nops;
171 else if (boot_cpu_has(X86_FEATURE_K7))
172 return k7_nops;
173 else if (boot_cpu_has(X86_FEATURE_NOPL))
174 return p6_nops;
175 else
176 return intel_nops;
9a0b5817
GH
177}
178
d167a518
GH
179#endif /* CONFIG_X86_64 */
180
ab144f5e 181/* Use this to add nops to a buffer, then text_poke the whole buffer. */
8b5a10fc 182static void __init_or_module add_nops(void *insns, unsigned int len)
139ec7c4 183{
121d7bf5 184 const unsigned char *const *noptable = find_nop_table();
139ec7c4
RR
185
186 while (len > 0) {
187 unsigned int noplen = len;
188 if (noplen > ASM_NOP_MAX)
189 noplen = ASM_NOP_MAX;
ab144f5e 190 memcpy(insns, noptable[noplen], noplen);
139ec7c4
RR
191 insns += noplen;
192 len -= noplen;
193 }
194}
195
d167a518 196extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
5967ed87 197extern s32 __smp_locks[], __smp_locks_end[];
fa6f2cc7 198void *text_poke_early(void *addr, const void *opcode, size_t len);
d167a518 199
9a0b5817
GH
200/* Replace instructions with better alternatives for this CPU type.
201 This runs before SMP is initialized to avoid SMP problems with
202 self modifying code. This implies that assymetric systems where
203 APs have less capabilities than the boot processor are not handled.
204 Tough. Make sure you disable such features by hand. */
205
8b5a10fc
JB
206void __init_or_module apply_alternatives(struct alt_instr *start,
207 struct alt_instr *end)
9a0b5817 208{
9a0b5817 209 struct alt_instr *a;
1b1d9258 210 u8 insnbuf[MAX_PATCH_LEN];
9a0b5817 211
77bf90ed 212 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
9a0b5817 213 for (a = start; a < end; a++) {
ab144f5e 214 u8 *instr = a->instr;
9a0b5817 215 BUG_ON(a->replacementlen > a->instrlen);
ab144f5e 216 BUG_ON(a->instrlen > sizeof(insnbuf));
3b770a21 217 BUG_ON(a->cpuid >= NCAPINTS*32);
9a0b5817
GH
218 if (!boot_cpu_has(a->cpuid))
219 continue;
d167a518
GH
220#ifdef CONFIG_X86_64
221 /* vsyscall code is not mapped yet. resolve it manually. */
222 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
223 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
224 DPRINTK("%s: vsyscall fixup: %p => %p\n",
77bf90ed 225 __func__, a->instr, instr);
d167a518
GH
226 }
227#endif
ab144f5e 228 memcpy(insnbuf, a->replacement, a->replacementlen);
1b1d9258
JB
229 if (*insnbuf == 0xe8 && a->replacementlen == 5)
230 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
ab144f5e
AK
231 add_nops(insnbuf + a->replacementlen,
232 a->instrlen - a->replacementlen);
e587cadd 233 text_poke_early(instr, insnbuf, a->instrlen);
9a0b5817
GH
234 }
235}
236
8ec4d41f
GH
237#ifdef CONFIG_SMP
238
5967ed87
JB
239static void alternatives_smp_lock(const s32 *start, const s32 *end,
240 u8 *text, u8 *text_end)
9a0b5817 241{
5967ed87 242 const s32 *poff;
9a0b5817 243
3945dab4 244 mutex_lock(&text_mutex);
5967ed87
JB
245 for (poff = start; poff < end; poff++) {
246 u8 *ptr = (u8 *)poff + *poff;
247
248 if (!*poff || ptr < text || ptr >= text_end)
9a0b5817 249 continue;
f88f07e0 250 /* turn DS segment override prefix into lock prefix */
d9c5841e
PA
251 if (*ptr == 0x3e)
252 text_poke(ptr, ((unsigned char []){0xf0}), 1);
9a0b5817 253 };
3945dab4 254 mutex_unlock(&text_mutex);
9a0b5817
GH
255}
256
5967ed87
JB
257static void alternatives_smp_unlock(const s32 *start, const s32 *end,
258 u8 *text, u8 *text_end)
9a0b5817 259{
5967ed87 260 const s32 *poff;
9a0b5817 261
b7fb4af0
JF
262 if (noreplace_smp)
263 return;
264
3945dab4 265 mutex_lock(&text_mutex);
5967ed87
JB
266 for (poff = start; poff < end; poff++) {
267 u8 *ptr = (u8 *)poff + *poff;
268
269 if (!*poff || ptr < text || ptr >= text_end)
9a0b5817 270 continue;
f88f07e0 271 /* turn lock prefix into DS segment override prefix */
d9c5841e
PA
272 if (*ptr == 0xf0)
273 text_poke(ptr, ((unsigned char []){0x3E}), 1);
9a0b5817 274 };
3945dab4 275 mutex_unlock(&text_mutex);
9a0b5817
GH
276}
277
278struct smp_alt_module {
279 /* what is this ??? */
280 struct module *mod;
281 char *name;
282
283 /* ptrs to lock prefixes */
5967ed87
JB
284 const s32 *locks;
285 const s32 *locks_end;
9a0b5817
GH
286
287 /* .text segment, needed to avoid patching init code ;) */
288 u8 *text;
289 u8 *text_end;
290
291 struct list_head next;
292};
293static LIST_HEAD(smp_alt_modules);
2f1dafe5 294static DEFINE_MUTEX(smp_alt);
ca74a6f8 295static int smp_mode = 1; /* protected by smp_alt */
9a0b5817 296
8b5a10fc
JB
297void __init_or_module alternatives_smp_module_add(struct module *mod,
298 char *name,
299 void *locks, void *locks_end,
300 void *text, void *text_end)
9a0b5817
GH
301{
302 struct smp_alt_module *smp;
9a0b5817 303
b7fb4af0
JF
304 if (noreplace_smp)
305 return;
306
9a0b5817
GH
307 if (smp_alt_once) {
308 if (boot_cpu_has(X86_FEATURE_UP))
309 alternatives_smp_unlock(locks, locks_end,
310 text, text_end);
311 return;
312 }
313
314 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
315 if (NULL == smp)
316 return; /* we'll run the (safe but slow) SMP code then ... */
317
318 smp->mod = mod;
319 smp->name = name;
320 smp->locks = locks;
321 smp->locks_end = locks_end;
322 smp->text = text;
323 smp->text_end = text_end;
324 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
77bf90ed 325 __func__, smp->locks, smp->locks_end,
9a0b5817
GH
326 smp->text, smp->text_end, smp->name);
327
2f1dafe5 328 mutex_lock(&smp_alt);
9a0b5817
GH
329 list_add_tail(&smp->next, &smp_alt_modules);
330 if (boot_cpu_has(X86_FEATURE_UP))
331 alternatives_smp_unlock(smp->locks, smp->locks_end,
332 smp->text, smp->text_end);
2f1dafe5 333 mutex_unlock(&smp_alt);
9a0b5817
GH
334}
335
8b5a10fc 336void __init_or_module alternatives_smp_module_del(struct module *mod)
9a0b5817
GH
337{
338 struct smp_alt_module *item;
9a0b5817 339
b7fb4af0 340 if (smp_alt_once || noreplace_smp)
9a0b5817
GH
341 return;
342
2f1dafe5 343 mutex_lock(&smp_alt);
9a0b5817
GH
344 list_for_each_entry(item, &smp_alt_modules, next) {
345 if (mod != item->mod)
346 continue;
347 list_del(&item->next);
2f1dafe5 348 mutex_unlock(&smp_alt);
77bf90ed 349 DPRINTK("%s: %s\n", __func__, item->name);
9a0b5817
GH
350 kfree(item);
351 return;
352 }
2f1dafe5 353 mutex_unlock(&smp_alt);
9a0b5817
GH
354}
355
356void alternatives_smp_switch(int smp)
357{
358 struct smp_alt_module *mod;
9a0b5817 359
3047e99e
IM
360#ifdef CONFIG_LOCKDEP
361 /*
17abecfe
IM
362 * Older binutils section handling bug prevented
363 * alternatives-replacement from working reliably.
364 *
365 * If this still occurs then you should see a hang
366 * or crash shortly after this line:
3047e99e 367 */
17abecfe 368 printk("lockdep: fixing up alternatives.\n");
3047e99e
IM
369#endif
370
b7fb4af0 371 if (noreplace_smp || smp_alt_once)
9a0b5817
GH
372 return;
373 BUG_ON(!smp && (num_online_cpus() > 1));
374
2f1dafe5 375 mutex_lock(&smp_alt);
ca74a6f8
AK
376
377 /*
378 * Avoid unnecessary switches because it forces JIT based VMs to
379 * throw away all cached translations, which can be quite costly.
380 */
381 if (smp == smp_mode) {
382 /* nothing */
383 } else if (smp) {
9a0b5817 384 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
53756d37
JF
385 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
386 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
9a0b5817
GH
387 list_for_each_entry(mod, &smp_alt_modules, next)
388 alternatives_smp_lock(mod->locks, mod->locks_end,
389 mod->text, mod->text_end);
390 } else {
391 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
53756d37
JF
392 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
393 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
9a0b5817
GH
394 list_for_each_entry(mod, &smp_alt_modules, next)
395 alternatives_smp_unlock(mod->locks, mod->locks_end,
396 mod->text, mod->text_end);
397 }
ca74a6f8 398 smp_mode = smp;
2f1dafe5 399 mutex_unlock(&smp_alt);
9a0b5817
GH
400}
401
2cfa1978
MH
402/* Return 1 if the address range is reserved for smp-alternatives */
403int alternatives_text_reserved(void *start, void *end)
404{
405 struct smp_alt_module *mod;
5967ed87 406 const s32 *poff;
076dc4a6
MH
407 u8 *text_start = start;
408 u8 *text_end = end;
2cfa1978
MH
409
410 list_for_each_entry(mod, &smp_alt_modules, next) {
076dc4a6 411 if (mod->text > text_end || mod->text_end < text_start)
2cfa1978 412 continue;
5967ed87
JB
413 for (poff = mod->locks; poff < mod->locks_end; poff++) {
414 const u8 *ptr = (const u8 *)poff + *poff;
415
416 if (text_start <= ptr && text_end > ptr)
2cfa1978 417 return 1;
5967ed87 418 }
2cfa1978
MH
419 }
420
421 return 0;
422}
8ec4d41f
GH
423#endif
424
139ec7c4 425#ifdef CONFIG_PARAVIRT
8b5a10fc
JB
426void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
427 struct paravirt_patch_site *end)
139ec7c4 428{
98de032b 429 struct paravirt_patch_site *p;
ab144f5e 430 char insnbuf[MAX_PATCH_LEN];
139ec7c4 431
959b4fdf
JF
432 if (noreplace_paravirt)
433 return;
434
139ec7c4
RR
435 for (p = start; p < end; p++) {
436 unsigned int used;
437
ab144f5e 438 BUG_ON(p->len > MAX_PATCH_LEN);
d34fda4a
CW
439 /* prep the buffer with the original instructions */
440 memcpy(insnbuf, p->instr, p->len);
93b1eab3
JF
441 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
442 (unsigned long)p->instr, p->len);
7f63c41c 443
63f70270
JF
444 BUG_ON(used > p->len);
445
139ec7c4 446 /* Pad the rest with nops */
ab144f5e 447 add_nops(insnbuf + used, p->len - used);
e587cadd 448 text_poke_early(p->instr, insnbuf, p->len);
139ec7c4 449 }
139ec7c4 450}
98de032b 451extern struct paravirt_patch_site __start_parainstructions[],
139ec7c4
RR
452 __stop_parainstructions[];
453#endif /* CONFIG_PARAVIRT */
454
9a0b5817
GH
455void __init alternative_instructions(void)
456{
8f4e956b
AK
457 /* The patching is not fully atomic, so try to avoid local interruptions
458 that might execute the to be patched code.
459 Other CPUs are not running. */
460 stop_nmi();
123aa76e
AK
461
462 /*
463 * Don't stop machine check exceptions while patching.
464 * MCEs only happen when something got corrupted and in this
465 * case we must do something about the corruption.
466 * Ignoring it is worse than a unlikely patching race.
467 * Also machine checks tend to be broadcast and if one CPU
468 * goes into machine check the others follow quickly, so we don't
469 * expect a machine check to cause undue problems during to code
470 * patching.
471 */
8f4e956b 472
9a0b5817
GH
473 apply_alternatives(__alt_instructions, __alt_instructions_end);
474
475 /* switch to patch-once-at-boottime-only mode and free the
476 * tables in case we know the number of CPUs will never ever
477 * change */
478#ifdef CONFIG_HOTPLUG_CPU
479 if (num_possible_cpus() < 2)
480 smp_alt_once = 1;
9a0b5817
GH
481#endif
482
8ec4d41f 483#ifdef CONFIG_SMP
9a0b5817
GH
484 if (smp_alt_once) {
485 if (1 == num_possible_cpus()) {
486 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
53756d37
JF
487 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
488 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
489
9a0b5817
GH
490 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
491 _text, _etext);
492 }
9a0b5817 493 } else {
9a0b5817
GH
494 alternatives_smp_module_add(NULL, "core kernel",
495 __smp_locks, __smp_locks_end,
496 _text, _etext);
ca74a6f8
AK
497
498 /* Only switch to UP mode if we don't immediately boot others */
649c6653 499 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
ca74a6f8 500 alternatives_smp_switch(0);
9a0b5817 501 }
8ec4d41f 502#endif
441d40dc 503 apply_paravirt(__parainstructions, __parainstructions_end);
8f4e956b 504
f68fd5f4
FW
505 if (smp_alt_once)
506 free_init_pages("SMP alternatives",
507 (unsigned long)__smp_locks,
508 (unsigned long)__smp_locks_end);
509
8f4e956b 510 restart_nmi();
9a0b5817 511}
19d36ccd 512
e587cadd
MD
513/**
514 * text_poke_early - Update instructions on a live kernel at boot time
515 * @addr: address to modify
516 * @opcode: source of the copy
517 * @len: length to copy
518 *
19d36ccd
AK
519 * When you use this code to patch more than one byte of an instruction
520 * you need to make sure that other CPUs cannot execute this code in parallel.
e587cadd
MD
521 * Also no thread must be currently preempted in the middle of these
522 * instructions. And on the local CPU you need to be protected again NMI or MCE
523 * handlers seeing an inconsistent instruction while you patch.
19d36ccd 524 */
fa6f2cc7 525void *__init_or_module text_poke_early(void *addr, const void *opcode,
8b5a10fc 526 size_t len)
19d36ccd 527{
e587cadd
MD
528 unsigned long flags;
529 local_irq_save(flags);
19d36ccd 530 memcpy(addr, opcode, len);
e587cadd 531 sync_core();
5367b688 532 local_irq_restore(flags);
e587cadd
MD
533 /* Could also do a CLFLUSH here to speed up CPU recovery; but
534 that causes hangs on some VIA CPUs. */
535 return addr;
536}
537
538/**
539 * text_poke - Update instructions on a live kernel
540 * @addr: address to modify
541 * @opcode: source of the copy
542 * @len: length to copy
543 *
544 * Only atomic text poke/set should be allowed when not doing early patching.
545 * It means the size must be writable atomically and the address must be aligned
546 * in a way that permits an atomic write. It also makes sure we fit on a single
547 * page.
78ff7fae
MH
548 *
549 * Note: Must be called under text_mutex.
e587cadd
MD
550 */
551void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
552{
78ff7fae 553 unsigned long flags;
e587cadd 554 char *vaddr;
b7b66baa
MD
555 struct page *pages[2];
556 int i;
e587cadd 557
b7b66baa
MD
558 if (!core_kernel_text((unsigned long)addr)) {
559 pages[0] = vmalloc_to_page(addr);
560 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
15a601eb 561 } else {
b7b66baa 562 pages[0] = virt_to_page(addr);
00c6b2d5 563 WARN_ON(!PageReserved(pages[0]));
b7b66baa 564 pages[1] = virt_to_page(addr + PAGE_SIZE);
e587cadd 565 }
b7b66baa 566 BUG_ON(!pages[0]);
7cf49427 567 local_irq_save(flags);
78ff7fae
MH
568 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
569 if (pages[1])
570 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
571 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
b7b66baa 572 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
78ff7fae
MH
573 clear_fixmap(FIX_TEXT_POKE0);
574 if (pages[1])
575 clear_fixmap(FIX_TEXT_POKE1);
576 local_flush_tlb();
19d36ccd 577 sync_core();
a534b679
AK
578 /* Could also do a CLFLUSH here to speed up CPU recovery; but
579 that causes hangs on some VIA CPUs. */
b7b66baa
MD
580 for (i = 0; i < len; i++)
581 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
7cf49427 582 local_irq_restore(flags);
e587cadd 583 return addr;
19d36ccd 584}
3d55cc8a
MH
585
586/*
587 * Cross-modifying kernel text with stop_machine().
588 * This code originally comes from immediate value.
589 */
590static atomic_t stop_machine_first;
591static int wrote_text;
592
593struct text_poke_params {
594 void *addr;
595 const void *opcode;
596 size_t len;
597};
598
599static int __kprobes stop_machine_text_poke(void *data)
600{
601 struct text_poke_params *tpp = data;
602
603 if (atomic_dec_and_test(&stop_machine_first)) {
604 text_poke(tpp->addr, tpp->opcode, tpp->len);
605 smp_wmb(); /* Make sure other cpus see that this has run */
606 wrote_text = 1;
607 } else {
608 while (!wrote_text)
e5a11016
MH
609 cpu_relax();
610 smp_mb(); /* Load wrote_text before following execution */
3d55cc8a
MH
611 }
612
613 flush_icache_range((unsigned long)tpp->addr,
614 (unsigned long)tpp->addr + tpp->len);
615 return 0;
616}
617
618/**
619 * text_poke_smp - Update instructions on a live kernel on SMP
620 * @addr: address to modify
621 * @opcode: source of the copy
622 * @len: length to copy
623 *
624 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
625 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
626 * should be allowed, since stop_machine() does _not_ protect code against
627 * NMI and MCE.
628 *
629 * Note: Must be called under get_online_cpus() and text_mutex.
630 */
631void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
632{
633 struct text_poke_params tpp;
634
635 tpp.addr = addr;
636 tpp.opcode = opcode;
637 tpp.len = len;
638 atomic_set(&stop_machine_first, 1);
639 wrote_text = 0;
3caa3751 640 /* Use __stop_machine() because the caller already got online_cpus. */
404ba5d7 641 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
3d55cc8a
MH
642 return addr;
643}
644
bf5438fc 645#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
f49aa448
JB
646
647unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
648
649void __init arch_init_ideal_nop5(void)
650{
651 extern const unsigned char ftrace_test_p6nop[];
652 extern const unsigned char ftrace_test_nop5[];
653 extern const unsigned char ftrace_test_jmp[];
654 int faulted = 0;
655
656 /*
657 * There is no good nop for all x86 archs.
658 * We will default to using the P6_NOP5, but first we
659 * will test to make sure that the nop will actually
660 * work on this CPU. If it faults, we will then
661 * go to a lesser efficient 5 byte nop. If that fails
662 * we then just use a jmp as our nop. This isn't the most
663 * efficient nop, but we can not use a multi part nop
664 * since we would then risk being preempted in the middle
665 * of that nop, and if we enabled tracing then, it might
666 * cause a system crash.
667 *
668 * TODO: check the cpuid to determine the best nop.
669 */
670 asm volatile (
671 "ftrace_test_jmp:"
672 "jmp ftrace_test_p6nop\n"
673 "nop\n"
674 "nop\n"
675 "nop\n" /* 2 byte jmp + 3 bytes */
676 "ftrace_test_p6nop:"
677 P6_NOP5
678 "jmp 1f\n"
679 "ftrace_test_nop5:"
680 ".byte 0x66,0x66,0x66,0x66,0x90\n"
681 "1:"
682 ".section .fixup, \"ax\"\n"
683 "2: movl $1, %0\n"
684 " jmp ftrace_test_nop5\n"
685 "3: movl $2, %0\n"
686 " jmp 1b\n"
687 ".previous\n"
688 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
689 _ASM_EXTABLE(ftrace_test_nop5, 3b)
690 : "=r"(faulted) : "0" (faulted));
691
692 switch (faulted) {
693 case 0:
694 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
695 memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
696 break;
697 case 1:
698 pr_info("converting mcount calls to 66 66 66 66 90\n");
699 memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
700 break;
701 case 2:
702 pr_info("converting mcount calls to jmp . + 5\n");
703 memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
704 break;
705 }
706
707}
708#endif