2 #include <linux/linkage.h>
3 #include <asm/segment.h>
4 #include <asm/pgtable.h>
8 # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
10 # wakeup_code runs in real mode, and at unknown address (determined at run-time).
11 # Therefore it must only use relative jumps/calls.
13 # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
15 # If physical address of wakeup_code is 0x12345, BIOS should call us with
16 # cs = 0x1234, eip = 0x05
27 # Running in *copy* of this code, somewhere in low 1MB.
29 movb $0xa1, %al ; outb %al, $0x80
34 movw %ax, %ds # Make ds:0 point to wakeup_start
36 # Private stack is needed for ASUS board
37 mov $(wakeup_stack - wakeup_code), %sp
39 pushl $0 # Kill any dangerous flags
42 movl real_magic - wakeup_code, %eax
43 cmpl $0x12345678, %eax
46 call verify_cpu # Verify the cpu supports long
51 testl $1, video_flags - wakeup_code
55 movw %ax, %ds # Bios might have played with that
59 testl $2, video_flags - wakeup_code
61 mov video_mode - wakeup_code, %ax
67 movw $0x0e00 + 'L', %fs:(0x10)
69 movb $0xa2, %al ; outb %al, $0x80
71 mov %ds, %ax # Find 32bit wakeup_code addr
72 movzx %ax, %esi # (Convert %ds:gdt to a liner ptr)
75 addl %esi, wakeup_32_vector - wakeup_code
76 addl %esi, wakeup_long64_vector - wakeup_code
77 addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer
79 lidtl %ds:idt_48a - wakeup_code
80 lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is
83 movl $1, %eax # protected mode (PE) bit
84 lmsw %ax # This is it!
88 ljmpl *(wakeup_32_vector - wakeup_code)
92 .long wakeup_32 - wakeup_code
93 .word __KERNEL32_CS, 0
97 # Running in this code, but at low address; paging is not yet turned on.
98 movb $0xa5, %al ; outb %al, $0x80
100 movl $__KERNEL_DS, %eax
103 movw $0x0e00 + 'i', %ds:(0xb8012)
104 movb $0xa8, %al ; outb %al, $0x80;
107 * Prepare for entering 64bits mode
115 /* Setup early boot stage 4 level pagetables */
116 leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax
119 /* Check if nx is implemented */
120 movl $0x80000001, %eax
124 /* Enable Long Mode */
126 btsl $_EFER_LME, %eax
128 /* No Execute supported? */
133 /* Make changes effective */
134 1: movl $MSR_EFER, %ecx
139 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
140 btsl $0, %eax /* Enable protected mode */
142 /* Make changes effective */
148 CR3 must point to PML4
149 Next instruction must be a branch
150 This must be on identity-mapped page
153 * At this point we're in long mode but in 32bit compatibility mode
154 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
155 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
156 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
159 /* Finally jump in 64bit mode */
160 ljmp *(wakeup_long64_vector - wakeup_code)(%esi)
163 wakeup_long64_vector:
164 .long wakeup_long64 - wakeup_code
169 /* Hooray, we are in Long 64-bit mode (but still running in
174 * We must switch to a new descriptor in kernel space for the GDT
175 * because soon the kernel won't have access anymore to the userspace
176 * addresses where we're currently running on. We have to do that here
177 * because in 32bit we couldn't load a 64bit linear address.
181 movw $0x0e00 + 'n', %ds:(0xb8014)
182 movb $0xa9, %al ; outb %al, $0x80
184 movq saved_magic, %rax
185 movq $0x123456789abcdef0, %rdx
189 movw $0x0e00 + 'u', %ds:(0xb8016)
193 movw $__KERNEL_DS, %ax
201 movw $0x0e00 + 'x', %ds:(0xb8018)
207 movw $0x0e00 + '!', %ds:(0xb801a)
215 /* Its good to keep gdt in sync with one in trampoline.S */
216 .word 0, 0, 0, 0 # dummy
217 /* ??? Why I need the accessed bit set in order for this to work? */
218 .quad 0x00cf9b000000ffff # __KERNEL32_CS
219 .quad 0x00af9b000000ffff # __KERNEL_CS
220 .quad 0x00cf93000000ffff # __KERNEL_DS
223 .word 0 # idt limit = 0
224 .word 0, 0 # idt base = 0L
227 .word 0x800 # gdt limit=2048,
229 .long gdta - wakeup_code # gdt base (relocated in later)
237 movb $0xba,%al ; outb %al,$0x80
242 movb $0xb3,%al ; outb %al,$0x80
247 movb $0xbc,%al ; outb %al,$0x80
250 #include "../verify_cpu.S"
252 /* This code uses an extended set of video mode numbers. These include:
253 * Aliases for standard modes
257 * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
258 * of compatibility when extending the table. These are between 0x00 and 0xff.
260 #define VIDEO_FIRST_MENU 0x0000
262 /* Standard BIOS video modes (BIOS number + 0x0100) */
263 #define VIDEO_FIRST_BIOS 0x0100
265 /* VESA BIOS video modes (VESA number + 0x0200) */
266 #define VIDEO_FIRST_VESA 0x0200
268 /* Video7 special modes (BIOS number + 0x0900) */
269 #define VIDEO_FIRST_V7 0x0900
271 # Setting of user mode (AX=mode ID) => CF=success
279 testb $VIDEO_RECALC>>8, %ah
282 cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
285 cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
288 cmpb $VIDEO_FIRST_V7>>8, %ah
292 cmpb $VIDEO_FIRST_VESA>>8, %ah
300 # jz setbios Add bios modes later
306 subb $VIDEO_FIRST_VESA>>8, %bh
307 orw $0x4000, %bx # Use linear frame buffer
308 movw $0x4f02, %ax # VESA BIOS mode set call
310 cmpw $0x004f, %ax # AL=4f if implemented
311 jnz _setbada # AH=0 if OK
316 _setbada: jmp setbada
318 wakeup_stack_begin: # Stack grows down
321 wakeup_stack: # Just below end of page
324 ENTRY(wakeup_level4_pgt)
325 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
327 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
328 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
333 # acpi_copy_wakeup_routine
335 # Copy the above routine to low memory.
338 # %rdi: place to copy wakeup routine to
340 # Returned address is location of code in low memory (past data and stack)
343 ENTRY(acpi_copy_wakeup_routine)
347 movl saved_video_mode, %edx
348 movl %edx, video_mode - wakeup_start (,%rdi)
349 movl acpi_video_flags, %edx
350 movl %edx, video_flags - wakeup_start (,%rdi)
351 movq $0x12345678, real_magic - wakeup_start (,%rdi)
352 movq $0x123456789abcdef0, %rdx
353 movq %rdx, saved_magic
355 movq saved_magic, %rax
356 movq $0x123456789abcdef0, %rdx
360 # restore the regs we used
363 ENTRY(do_suspend_lowlevel_s4bios)
368 .globl do_suspend_lowlevel
369 .type do_suspend_lowlevel,@function
374 call save_processor_state
376 movq %rsp, saved_context_esp(%rip)
377 movq %rax, saved_context_eax(%rip)
378 movq %rbx, saved_context_ebx(%rip)
379 movq %rcx, saved_context_ecx(%rip)
380 movq %rdx, saved_context_edx(%rip)
381 movq %rbp, saved_context_ebp(%rip)
382 movq %rsi, saved_context_esi(%rip)
383 movq %rdi, saved_context_edi(%rip)
384 movq %r8, saved_context_r08(%rip)
385 movq %r9, saved_context_r09(%rip)
386 movq %r10, saved_context_r10(%rip)
387 movq %r11, saved_context_r11(%rip)
388 movq %r12, saved_context_r12(%rip)
389 movq %r13, saved_context_r13(%rip)
390 movq %r14, saved_context_r14(%rip)
391 movq %r15, saved_context_r15(%rip)
392 pushfq ; popq saved_context_eflags(%rip)
394 movq $.L97, saved_rip(%rip)
405 jmp acpi_enter_sleep_state
412 movq saved_context+58(%rip), %rax
414 movq saved_context+50(%rip), %rax
416 movq saved_context+42(%rip), %rax
418 movq saved_context+34(%rip), %rax
420 pushq saved_context_eflags(%rip) ; popfq
421 movq saved_context_esp(%rip), %rsp
422 movq saved_context_ebp(%rip), %rbp
423 movq saved_context_eax(%rip), %rax
424 movq saved_context_ebx(%rip), %rbx
425 movq saved_context_ecx(%rip), %rcx
426 movq saved_context_edx(%rip), %rdx
427 movq saved_context_esi(%rip), %rsi
428 movq saved_context_edi(%rip), %rdi
429 movq saved_context_r08(%rip), %r8
430 movq saved_context_r09(%rip), %r9
431 movq saved_context_r10(%rip), %r10
432 movq saved_context_r11(%rip), %r11
433 movq saved_context_r12(%rip), %r12
434 movq saved_context_r13(%rip), %r13
435 movq saved_context_r14(%rip), %r14
436 movq saved_context_r15(%rip), %r15
440 jmp restore_processor_state
443 .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
447 ENTRY(saved_rbp) .quad 0
448 ENTRY(saved_rsi) .quad 0
449 ENTRY(saved_rdi) .quad 0
450 ENTRY(saved_rbx) .quad 0
452 ENTRY(saved_rip) .quad 0
453 ENTRY(saved_rsp) .quad 0
455 ENTRY(saved_magic) .quad 0