1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2008, Intel Corporation
# All rights reserved. This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# EnablePaging64.S
#
# Abstract:
#
# AsmEnablePaging64 function
#
# Notes:
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# VOID
# EFIAPI
# InternalX86EnablePaging64 (
# IN UINT16 Cs,
# IN UINT64 EntryPoint,
# IN UINT64 Context1, OPTIONAL
# IN UINT64 Context2, OPTIONAL
# IN UINT64 NewStack
# );
#------------------------------------------------------------------------------
.global ASM_PFX(InternalX86EnablePaging64)
ASM_PFX(InternalX86EnablePaging64):
cli
pop %rax # skip the return address
callq Base
Base:
addl $(L1-Base),(%rsp) # offset for ret, seg is the 1st arg
mov %cr4,%rax
or $0x20,%al
mov %rax,%cr4 # enable PAE
mov $0xc0000080,%ecx
rdmsr
or $0x1,%ah # set LME
wrmsr
mov %cr0,%rax
bts $0x1f,%eax
mov %rax,%cr0 # enable paging
lret
L1: # long mode starts here
addr32 mov (%esp),%rbx
addr32 mov 0x8(%esp),%rcx
addr32 mov 0x10(%esp),%rdx
addr32 mov 0x18(%esp),%rsp
add $-0x20,%rsp
callq *%rbx
jmp . # dead loop if EntryPoint() returned
|