1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <linux/list.h>
4 #include <asm/alternative.h>
5 #include <asm/sections.h>
7 static int no_replacement = 0;
8 static int smp_alt_once = 0;
9 static int debug_alternative = 0;
11 static int __init noreplacement_setup(char *s)
16 static int __init bootonly(char *str)
21 static int __init debug_alt(char *str)
23 debug_alternative = 1;
27 __setup("noreplacement", noreplacement_setup);
28 __setup("smp-alt-boot", bootonly);
29 __setup("debug-alternative", debug_alt);
31 #define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
35 /* Use inline assembly to define this because the nops are defined
36 as inline assembly strings in the include files and we cannot
37 get them easily into strings. */
38 asm("\t.data\nintelnops: "
39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
40 GENERIC_NOP7 GENERIC_NOP8);
41 extern unsigned char intelnops[];
42 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
47 intelnops + 1 + 2 + 3,
48 intelnops + 1 + 2 + 3 + 4,
49 intelnops + 1 + 2 + 3 + 4 + 5,
50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
56 asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
59 extern unsigned char k8nops[];
60 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
66 k8nops + 1 + 2 + 3 + 4,
67 k8nops + 1 + 2 + 3 + 4 + 5,
68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
74 asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
77 extern unsigned char k7nops[];
78 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
84 k7nops + 1 + 2 + 3 + 4,
85 k7nops + 1 + 2 + 3 + 4 + 5,
86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
93 extern char __vsyscall_0;
94 static inline unsigned char** find_nop_table(void)
99 #else /* CONFIG_X86_64 */
103 unsigned char **noptable;
105 { X86_FEATURE_K8, k8_nops },
106 { X86_FEATURE_K7, k7_nops },
110 static unsigned char** find_nop_table(void)
112 unsigned char **noptable = intel_nops;
115 for (i = 0; noptypes[i].cpuid >= 0; i++) {
116 if (boot_cpu_has(noptypes[i].cpuid)) {
117 noptable = noptypes[i].noptable;
124 #endif /* CONFIG_X86_64 */
126 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128 extern u8 *__smp_locks[], *__smp_locks_end[];
130 extern u8 __smp_alt_begin[], __smp_alt_end[];
132 /* Replace instructions with better alternatives for this CPU type.
133 This runs before SMP is initialized to avoid SMP problems with
134 self modifying code. This implies that assymetric systems where
135 APs have less capabilities than the boot processor are not handled.
136 Tough. Make sure you disable such features by hand. */
138 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
140 unsigned char **noptable = find_nop_table();
145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
146 for (a = start; a < end; a++) {
147 BUG_ON(a->replacementlen > a->instrlen);
148 if (!boot_cpu_has(a->cpuid))
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
159 memcpy(instr, a->replacement, a->replacementlen);
160 diff = a->instrlen - a->replacementlen;
161 /* Pad the rest with nops */
162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
166 memcpy(a->instr + i, noptable[k], k);
173 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
177 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
178 for (a = start; a < end; a++) {
179 memcpy(a->replacement + a->replacementlen,
185 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
189 for (a = start; a < end; a++) {
191 a->replacement + a->replacementlen,
196 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
200 for (ptr = start; ptr < end; ptr++) {
205 **ptr = 0xf0; /* lock prefix */
209 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
211 unsigned char **noptable = find_nop_table();
214 for (ptr = start; ptr < end; ptr++) {
219 **ptr = noptable[1][0];
223 struct smp_alt_module {
224 /* what is this ??? */
228 /* ptrs to lock prefixes */
232 /* .text segment, needed to avoid patching init code ;) */
236 struct list_head next;
238 static LIST_HEAD(smp_alt_modules);
239 static DEFINE_SPINLOCK(smp_alt);
241 void alternatives_smp_module_add(struct module *mod, char *name,
242 void *locks, void *locks_end,
243 void *text, void *text_end)
245 struct smp_alt_module *smp;
252 if (boot_cpu_has(X86_FEATURE_UP))
253 alternatives_smp_unlock(locks, locks_end,
258 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
260 return; /* we'll run the (safe but slow) SMP code then ... */
265 smp->locks_end = locks_end;
267 smp->text_end = text_end;
268 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
269 __FUNCTION__, smp->locks, smp->locks_end,
270 smp->text, smp->text_end, smp->name);
272 spin_lock_irqsave(&smp_alt, flags);
273 list_add_tail(&smp->next, &smp_alt_modules);
274 if (boot_cpu_has(X86_FEATURE_UP))
275 alternatives_smp_unlock(smp->locks, smp->locks_end,
276 smp->text, smp->text_end);
277 spin_unlock_irqrestore(&smp_alt, flags);
280 void alternatives_smp_module_del(struct module *mod)
282 struct smp_alt_module *item;
285 if (no_replacement || smp_alt_once)
288 spin_lock_irqsave(&smp_alt, flags);
289 list_for_each_entry(item, &smp_alt_modules, next) {
290 if (mod != item->mod)
292 list_del(&item->next);
293 spin_unlock_irqrestore(&smp_alt, flags);
294 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
298 spin_unlock_irqrestore(&smp_alt, flags);
301 void alternatives_smp_switch(int smp)
303 struct smp_alt_module *mod;
306 if (no_replacement || smp_alt_once)
308 BUG_ON(!smp && (num_online_cpus() > 1));
310 spin_lock_irqsave(&smp_alt, flags);
312 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
313 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
314 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
315 alternatives_smp_apply(__smp_alt_instructions,
316 __smp_alt_instructions_end);
317 list_for_each_entry(mod, &smp_alt_modules, next)
318 alternatives_smp_lock(mod->locks, mod->locks_end,
319 mod->text, mod->text_end);
321 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
322 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
323 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
324 apply_alternatives(__smp_alt_instructions,
325 __smp_alt_instructions_end);
326 list_for_each_entry(mod, &smp_alt_modules, next)
327 alternatives_smp_unlock(mod->locks, mod->locks_end,
328 mod->text, mod->text_end);
330 spin_unlock_irqrestore(&smp_alt, flags);
335 void __init alternative_instructions(void)
337 if (no_replacement) {
338 printk(KERN_INFO "(SMP-)alternatives turned off\n");
339 free_init_pages("SMP alternatives",
340 (unsigned long)__smp_alt_begin,
341 (unsigned long)__smp_alt_end);
344 apply_alternatives(__alt_instructions, __alt_instructions_end);
346 /* switch to patch-once-at-boottime-only mode and free the
347 * tables in case we know the number of CPUs will never ever
349 #ifdef CONFIG_HOTPLUG_CPU
350 if (num_possible_cpus() < 2)
358 if (1 == num_possible_cpus()) {
359 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
360 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
361 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
362 apply_alternatives(__smp_alt_instructions,
363 __smp_alt_instructions_end);
364 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
367 free_init_pages("SMP alternatives",
368 (unsigned long)__smp_alt_begin,
369 (unsigned long)__smp_alt_end);
371 alternatives_smp_save(__smp_alt_instructions,
372 __smp_alt_instructions_end);
373 alternatives_smp_module_add(NULL, "core kernel",
374 __smp_locks, __smp_locks_end,
376 alternatives_smp_switch(0);