summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S36
-rw-r--r--MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm38
-rw-r--r--MdePkg/Library/BaseLib/Arm/CpuPause.asm41
-rw-r--r--MdePkg/Library/BaseLib/Arm/DisableInterrupts.S35
-rw-r--r--MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm37
-rw-r--r--MdePkg/Library/BaseLib/Arm/EnableInterrupts.S36
-rw-r--r--MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm37
-rw-r--r--MdePkg/Library/BaseLib/Arm/GccInline.c61
-rw-r--r--MdePkg/Library/BaseLib/Arm/GetInterruptsState.S43
-rw-r--r--MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm45
-rw-r--r--MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c59
-rw-r--r--MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S70
-rw-r--r--MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm70
-rw-r--r--MdePkg/Library/BaseLib/Arm/SwitchStack.asm45
-rw-r--r--MdePkg/Library/BaseLib/Arm/Unaligned.c252
-rw-r--r--MdePkg/Library/BaseLib/BaseLib.inf24
-rw-r--r--MdePkg/Library/BaseLib/Ia32/GccInline.c1758
-rw-r--r--MdePkg/Library/BaseLib/X64/GccInline.c1806
18 files changed, 4491 insertions, 2 deletions
diff --git a/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S b/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S
new file mode 100644
index 0000000000..0c3a9d5899
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S
@@ -0,0 +1,36 @@
+#------------------------------------------------------------------------------
+#
+# CpuBreakpoint() for ARM
+#
+# Copyright (c) 2006 - 2009, Intel Corporation<BR>
+# Portions copyright (c) 2008-2009 Apple Inc.<BR>
+# All rights reserved. This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.align 2
+.globl ASM_PFX(CpuBreakpoint)
+
+#/**
+# Generates a breakpoint on the CPU.
+#
+# Generates a breakpoint on the CPU. The breakpoint must be implemented such
+# that code can resume normal execution after the breakpoint.
+#
+#**/
+#VOID
+#EFIAPI
+#CpuBreakpoint (
+# VOID
+# );
+#
+ASM_PFX(CpuBreakpoint):
+ swi 0xdbdbdb
+ bx lr
diff --git a/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm b/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm
new file mode 100644
index 0000000000..83485e9d78
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm
@@ -0,0 +1,38 @@
+;------------------------------------------------------------------------------
+;
+; CpuBreakpoint() for ARM
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT CpuBreakpoint
+
+ AREA Cpu_Breakpoint, CODE, READONLY
+
+;/**
+; Generates a breakpoint on the CPU.
+;
+; Generates a breakpoint on the CPU. The breakpoint must be implemented such
+; that code can resume normal execution after the breakpoint.
+;
+;**/
+;VOID
+;EFIAPI
+;CpuBreakpoint (
+; VOID
+; );
+;
+CpuBreakpoint
+ swi 0xdbdbdb
+ bx lr
+
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/CpuPause.asm b/MdePkg/Library/BaseLib/Arm/CpuPause.asm
new file mode 100644
index 0000000000..f6d19c9d96
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/CpuPause.asm
@@ -0,0 +1,41 @@
+;------------------------------------------------------------------------------
+;
+; CpuPause() for ARM
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT CpuPause
+ AREA cpu_pause, CODE, READONLY
+
+;/**
+; Requests CPU to pause for a short period of time.
+;
+; Requests CPU to pause for a short period of time. Typically used in MP
+; systems to prevent memory starvation while waiting for a spin lock.
+;
+;**/
+;VOID
+;EFIAPI
+;CpuPause (
+; VOID
+; );
+;
+CpuPause
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ BX LR
+
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/DisableInterrupts.S b/MdePkg/Library/BaseLib/Arm/DisableInterrupts.S
new file mode 100644
index 0000000000..9fbc3f9510
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/DisableInterrupts.S
@@ -0,0 +1,35 @@
+#------------------------------------------------------------------------------
+#
+# DisableInterrupts() for ARM
+#
+# Copyright (c) 2006 - 2009, Intel Corporation<BR>
+# Portions copyright (c) 2008-2009 Apple Inc.<BR>
+# All rights reserved. This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+.globl ASM_PFX(DisableInterrupts)
+
+#/**
+# Disables CPU interrupts.
+#
+#**/
+#VOID
+#EFIAPI
+#DisableInterrupts (
+# VOID
+# );
+#
+ASM_PFX(DisableInterrupts):
+ mrs R0,CPSR
+ orr R0,R0,#0x80 @Disable IRQ interrupts
+ msr CPSR_c,R0
+ bx LR
diff --git a/MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm b/MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm
new file mode 100644
index 0000000000..71adb1c7a5
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm
@@ -0,0 +1,37 @@
+;------------------------------------------------------------------------------
+;
+; DisableInterrupts() for ARM
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT DisableInterrupts
+
+ AREA Interrupt_disable, CODE, READONLY
+
+;/**
+; Disables CPU interrupts.
+;
+;**/
+;VOID
+;EFIAPI
+;DisableInterrupts (
+; VOID
+; );
+;
+DisableInterrupts
+ MRS R0,CPSR
+ ORR R0,R0,#0x80 ;Disable IRQ interrupts
+ MSR CPSR_c,R0
+ BX LR
+
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/EnableInterrupts.S b/MdePkg/Library/BaseLib/Arm/EnableInterrupts.S
new file mode 100644
index 0000000000..b4115e802b
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/EnableInterrupts.S
@@ -0,0 +1,36 @@
+#------------------------------------------------------------------------------
+#
+# EnableInterrupts() for ARM
+#
+# Copyright (c) 2006 - 2009, Intel Corporation<BR>
+# Portions copyright (c) 2008-2009 Apple Inc.<BR>
+# All rights reserved. This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+.globl ASM_PFX(EnableInterrupts)
+
+
+#/**
+# Enables CPU interrupts.
+#
+#**/
+#VOID
+#EFIAPI
+#EnableInterrupts (
+# VOID
+# );
+#
+ASM_PFX(EnableInterrupts):
+ mrs R0,CPSR
+ bic R0,R0,#0x80 @Enable IRQ interrupts
+ msr CPSR_c,R0
+ bx LR
diff --git a/MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm b/MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm
new file mode 100644
index 0000000000..1a98fb57b4
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm
@@ -0,0 +1,37 @@
+;------------------------------------------------------------------------------
+;
+; EnableInterrupts() for ARM
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT EnableInterrupts
+
+ AREA Interrupt_enable, CODE, READONLY
+
+;/**
+; Enables CPU interrupts.
+;
+;**/
+;VOID
+;EFIAPI
+;EnableInterrupts (
+; VOID
+; );
+;
+EnableInterrupts
+ MRS R0,CPSR
+ BIC R0,R0,#0x80 ;Enable IRQ interrupts
+ MSR CPSR_c,R0
+ BX LR
+
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/GccInline.c b/MdePkg/Library/BaseLib/Arm/GccInline.c
new file mode 100644
index 0000000000..cb1dc8bebe
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/GccInline.c
@@ -0,0 +1,61 @@
+/** @file
+ GCC inline implementation of BaseLib processor specific functions.
+
+ Copyright (c) 2006 - 2007, Intel Corporation<BR>
+ Portions copyright (c) 2008-2009 Apple Inc.<BR>
+ All rights reserved. This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+ Requests CPU to pause for a short period of time.
+
+ Requests CPU to pause for a short period of time. Typically used in MP
+ systems to prevent memory starvation while waiting for a spin lock.
+
+**/
+VOID
+EFIAPI
+CpuPause (
+ VOID
+ )
+{
+ __asm__ __volatile__ (
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ );
+}
+
+VOID
+EFIAPI
+InternalSwitchStackAsm (
+ SWITCH_STACK_ENTRY_POINT EntryPoint,
+ VOID *Context,
+ VOID *Context2,
+ VOID *NewStack
+ )
+{
+ __asm__ __volatile__ (
+ "mov lr, %0\n\t"
+ "mov sp, %3\n\t"
+ "mov %r0, %1\n\t"
+ "mov %r1, %2\n\t"
+ "bx lr\n\t"
+ : /* no output operand */
+ : "r" (EntryPoint),
+ "r" (Context),
+ "r" (Context2),
+ "r" (NewStack)
+ );
+}
diff --git a/MdePkg/Library/BaseLib/Arm/GetInterruptsState.S b/MdePkg/Library/BaseLib/Arm/GetInterruptsState.S
new file mode 100644
index 0000000000..4103aaaf9b
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/GetInterruptsState.S
@@ -0,0 +1,43 @@
+#------------------------------------------------------------------------------
+#
+# GetInterruptState() function for ARM
+#
+# Copyright (c) 2006 - 2009, Intel Corporation<BR>
+# Portions copyright (c) 2008-2009 Apple Inc.<BR>
+# All rights reserved. This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+.globl _GetInterruptState
+
+#/**
+# Retrieves the current CPU interrupt state.
+#
+# Returns TRUE is interrupts are currently enabled. Otherwise
+# returns FALSE.
+#
+# @retval TRUE CPU interrupts are enabled.
+# @retval FALSE CPU interrupts are disabled.
+#
+#**/
+#
+#BOOLEAN
+#EFIAPI
+#GetInterruptState (
+# VOID
+# );
+#
+_GetInterruptState:
+ mrs R0, CPSR
+ tst R0, #0x80 @Check if IRQ is enabled.
+ moveq R0, #1
+ movne R0, #0
+ bx LR
diff --git a/MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm b/MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm
new file mode 100644
index 0000000000..9516cfc3e1
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm
@@ -0,0 +1,45 @@
+;------------------------------------------------------------------------------
+;
+; GetInterruptState() function for ARM
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT GetInterruptState
+
+ AREA Interrupt_enable, CODE, READONLY
+
+;/**
+; Retrieves the current CPU interrupt state.
+;
+; Returns TRUE is interrupts are currently enabled. Otherwise
+; returns FALSE.
+;
+; @retval TRUE CPU interrupts are enabled.
+; @retval FALSE CPU interrupts are disabled.
+;
+;**/
+;
+;BOOLEAN
+;EFIAPI
+;GetInterruptState (
+; VOID
+; );
+;
+GetInterruptState
+ MRS R0, CPSR
+ TST R0, #0x80 ;Check if IRQ is enabled.
+ MOVEQ R0, #1
+ MOVNE R0, #0
+ BX LR
+
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c b/MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c
new file mode 100644
index 0000000000..7b6351122f
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c
@@ -0,0 +1,59 @@
+/** @file
+ SwitchStack() function for ARM.
+
+ Copyright (c) 2006 - 2007, Intel Corporation<BR>
+ Portions copyright (c) 2008-2009 Apple Inc.<BR>
+ All rights reserved. This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+ Transfers control to a function starting with a new stack.
+
+ Transfers control to the function specified by EntryPoint using the
+ new stack specified by NewStack and passing in the parameters specified
+ by Context1 and Context2. Context1 and Context2 are optional and may
+ be NULL. The function EntryPoint must never return.
+ Marker will be ignored on IA-32, x64, and EBC.
+ IPF CPUs expect one additional parameter of type VOID * that specifies
+ the new backing store pointer.
+
+ If EntryPoint is NULL, then ASSERT().
+ If NewStack is NULL, then ASSERT().
+
+ @param EntryPoint A pointer to function to call with the new stack.
+ @param Context1 A pointer to the context to pass into the EntryPoint
+ function.
+ @param Context2 A pointer to the context to pass into the EntryPoint
+ function.
+ @param NewStack A pointer to the new stack to use for the EntryPoint
+ function.
+ @param Marker VA_LIST marker for the variable argument list.
+
+**/
+VOID
+EFIAPI
+InternalSwitchStack (
+ IN SWITCH_STACK_ENTRY_POINT EntryPoint,
+ IN VOID *Context1, OPTIONAL
+ IN VOID *Context2, OPTIONAL
+ IN VOID *NewStack,
+ IN VA_LIST Marker
+ )
+
+{
+ //
+ // Stack should be aligned with CPU_STACK_ALIGNMENT
+ //
+ ASSERT (((UINTN)NewStack & (CPU_STACK_ALIGNMENT - 1)) == 0);
+
+ InternalSwitchStackAsm (EntryPoint, Context1, Context2, NewStack);
+}
diff --git a/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S b/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S
new file mode 100644
index 0000000000..d79ad2e84b
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S
@@ -0,0 +1,70 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2006 - 2009, Intel Corporation<BR>
+# Portions copyright (c) 2008-2009 Apple Inc.<BR>
+# All rights reserved. This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+.text
+.p2align 2
+
+.globl ASM_PFX(SetJump)
+.globl ASM_PFX(InternalLongJump)
+
+#/**
+# Saves the current CPU context that can be restored with a call to LongJump() and returns 0.#
+#
+# Saves the current CPU context in the buffer specified by JumpBuffer and returns 0. The initial
+# call to SetJump() must always return 0. Subsequent calls to LongJump() cause a non-zero
+# value to be returned by SetJump().
+#
+# If JumpBuffer is NULL, then ASSERT().
+# For IPF CPUs, if JumpBuffer is not aligned on a 16-byte boundary, then ASSERT().
+#
+# @param JumpBuffer A pointer to CPU context buffer.
+#
+#**/
+#
+#UINTN
+#EFIAPI
+#SetJump (
+# IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer // R0
+# );
+#
+ASM_PFX(SetJump):
+ mov r3, r13
+ stmia r0, {r3-r12,r14}
+ eor r0, r0, r0
+ bx lr
+
+#/**
+# Restores the CPU context that was saved with SetJump().#
+#
+# Restores the CPU context from the buffer specified by JumpBuffer.
+# This function never returns to the caller.
+# Instead is resumes execution based on the state of JumpBuffer.
+#
+# @param JumpBuffer A pointer to CPU context buffer.
+# @param Value The value to return when the SetJump() context is restored.
+#
+#**/
+#VOID
+#EFIAPI
+#InternalLongJump (
+# IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer, // R0
+# IN UINTN Value // R1
+# );
+#
+ASM_PFX(InternalLongJump):
+ ldmia r0, {r3-r12,r14}
+ mov r13, r3
+ mov r0, r1
+ bx lr
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED()
diff --git a/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm b/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm
new file mode 100644
index 0000000000..d277742965
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm
@@ -0,0 +1,70 @@
+;------------------------------------------------------------------------------
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT SetJump
+ EXPORT InternalLongJump
+
+ AREA BaseLib, CODE, READONLY
+
+;/**
+; Saves the current CPU context that can be restored with a call to LongJump() and returns 0.;
+;
+; Saves the current CPU context in the buffer specified by JumpBuffer and returns 0. The initial
+; call to SetJump() must always return 0. Subsequent calls to LongJump() cause a non-zero
+; value to be returned by SetJump().
+;
+; If JumpBuffer is NULL, then ASSERT().
+; For IPF CPUs, if JumpBuffer is not aligned on a 16-byte boundary, then ASSERT().
+;
+; @param JumpBuffer A pointer to CPU context buffer.
+;
+;**/
+;
+;UINTN
+;EFIAPI
+;SetJump (
+; IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer // R0
+; )
+;
+SetJump
+ MOV R3, R13
+ STM R0, {R3-R12,R14}
+ EOR R0, R0
+ BX LR
+
+;/**
+; Restores the CPU context that was saved with SetJump().;
+;
+; Restores the CPU context from the buffer specified by JumpBuffer.
+; This function never returns to the caller.
+; Instead is resumes execution based on the state of JumpBuffer.
+;
+; @param JumpBuffer A pointer to CPU context buffer.
+; @param Value The value to return when the SetJump() context is restored.
+;
+;**/
+;VOID
+;EFIAPI
+;InternalLongJump (
+; IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer, // R0
+; IN UINTN Value // R1
+; );
+;
+InternalLongJump
+ LDM R0, {R3-R12,R14}
+ MOV R13, R3
+ MOV R0, R1
+ BX LR
+
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/SwitchStack.asm b/MdePkg/Library/BaseLib/Arm/SwitchStack.asm
new file mode 100644
index 0000000000..2cc7b57e04
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/SwitchStack.asm
@@ -0,0 +1,45 @@
+;------------------------------------------------------------------------------
+;
+; Copyright (c) 2006 - 2009, Intel Corporation<BR>
+; Portions copyright (c) 2008-2009 Apple Inc.<BR>
+; All rights reserved. This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;------------------------------------------------------------------------------
+
+ EXPORT InternalSwitchStackAsm
+
+ AREA Switch_Stack, CODE, READONLY
+
+;/**
+; This allows the caller to switch the stack and goes to the new entry point
+;
+; @param EntryPoint Pointer to the location to enter
+; @param Context Parameter to pass in
+; @param Context2 Parameter2 to pass in
+; @param NewStack New Location of the stack
+;
+; @return Nothing. Goes to the Entry Point passing in the new parameters
+;
+;**/
+;VOID
+;EFIAPI
+;InternalSwitchStackAsm (
+; SWITCH_STACK_ENTRY_POINT EntryPoint,
+; VOID *Context,
+; VOID *Context2,
+; VOID *NewStack
+; );
+;
+InternalSwitchStackAsm
+ MOV LR, R0
+ MOV SP, R3
+ MOV R0, R1
+ MOV R1, R2
+ BX LR
+ END
diff --git a/MdePkg/Library/BaseLib/Arm/Unaligned.c b/MdePkg/Library/BaseLib/Arm/Unaligned.c
new file mode 100644
index 0000000000..76a7c620ab
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Arm/Unaligned.c
@@ -0,0 +1,252 @@
+/** @file
+ Unaligned access functions of BaseLib for ARM.
+
+ volatile was added to work around optimization issues.
+
+ Copyright (c) 2006 - 2009, Intel Corporation<BR>
+ Portions Copyright (c) 2008-2009 Apple Inc.<BR>
+ All rights reserved. This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+ Reads a 16-bit value from memory that may be unaligned.
+
+ This function returns the 16-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 16-bit value that may be unaligned.
+
+ @return The 16-bit value read from Buffer.
+
+**/
+UINT16
+EFIAPI
+ReadUnaligned16 (
+ IN CONST UINT16 *Buffer
+ )
+{
+ volatile UINT8 LowerByte;
+ volatile UINT8 HigherByte;
+
+ ASSERT (Buffer != NULL);
+
+ LowerByte = ((UINT8*)Buffer)[0];
+ HigherByte = ((UINT8*)Buffer)[1];
+
+ return (UINT16)(LowerByte | (HigherByte << 8));
+}
+
+/**
+ Writes a 16-bit value to memory that may be unaligned.
+
+ This function writes the 16-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 16-bit value that may be unaligned.
+ @param Value 16-bit value to write to Buffer.
+
+ @return The 16-bit value to write to Buffer.
+
+**/
+UINT16
+EFIAPI
+WriteUnaligned16 (
+ OUT UINT16 *Buffer,
+ IN UINT16 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ ((volatile UINT8*)Buffer)[0] = (UINT8)Value;
+ ((volatile UINT8*)Buffer)[1] = (UINT8)(Value >> 8);
+
+ return Value;
+}
+
+/**
+ Reads a 24-bit value from memory that may be unaligned.
+
+ This function returns the 24-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 24-bit value that may be unaligned.
+
+ @return The 24-bit value read from Buffer.
+
+**/
+UINT32
+EFIAPI
+ReadUnaligned24 (
+ IN CONST UINT32 *Buffer
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ return (UINT32)(
+ ReadUnaligned16 ((UINT16*)Buffer) |
+ (((UINT8*)Buffer)[2] << 16)
+ );
+}
+
+/**
+ Writes a 24-bit value to memory that may be unaligned.
+
+ This function writes the 24-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 24-bit value that may be unaligned.
+ @param Value 24-bit value to write to Buffer.
+
+ @return The 24-bit value to write to Buffer.
+
+**/
+UINT32
+EFIAPI
+WriteUnaligned24 (
+ OUT UINT32 *Buffer,
+ IN UINT32 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ WriteUnaligned16 ((UINT16*)Buffer, (UINT16)Value);
+ *(UINT8*)((UINT16*)Buffer + 1) = (UINT8)(Value >> 16);
+ return Value;
+}
+
+/**
+ Reads a 32-bit value from memory that may be unaligned.
+
+ This function returns the 32-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 32-bit value that may be unaligned.
+
+ @return The 32-bit value read from Buffer.
+
+**/
+UINT32
+EFIAPI
+ReadUnaligned32 (
+ IN CONST UINT32 *Buffer
+ )
+{
+ UINT16 LowerBytes;
+ UINT16 HigherBytes;
+
+ ASSERT (Buffer != NULL);
+
+ LowerBytes = ReadUnaligned16 ((UINT16*) Buffer);
+ HigherBytes = ReadUnaligned16 ((UINT16*) Buffer + 1);
+
+ return (UINT32) (LowerBytes | (HigherBytes << 16));
+}
+
+/**
+ Writes a 32-bit value to memory that may be unaligned.
+
+ This function writes the 32-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 32-bit value that may be unaligned.
+ @param Value 32-bit value to write to Buffer.
+
+ @return The 32-bit value to write to Buffer.
+
+**/
+UINT32
+EFIAPI
+WriteUnaligned32 (
+ OUT UINT32 *Buffer,
+ IN UINT32 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ WriteUnaligned16 ((UINT16*)Buffer, (UINT16)Value);
+ WriteUnaligned16 ((UINT16*)Buffer + 1, (UINT16)(Value >> 16));
+ return Value;
+}
+
+/**
+ Reads a 64-bit value from memory that may be unaligned.
+
+ This function returns the 64-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 64-bit value that may be unaligned.
+
+ @return The 64-bit value read from Buffer.
+
+**/
+UINT64
+EFIAPI
+ReadUnaligned64 (
+ IN CONST UINT64 *Buffer
+ )
+{
+ UINT32 LowerBytes;
+ UINT32 HigherBytes;
+
+ ASSERT (Buffer != NULL);
+
+ LowerBytes = ReadUnaligned32 ((UINT32*) Buffer);
+ HigherBytes = ReadUnaligned32 ((UINT32*) Buffer + 1);
+
+ return (UINT64) (LowerBytes | LShiftU64 (HigherBytes, 32));
+}
+
+/**
+ Writes a 64-bit value to memory that may be unaligned.
+
+ This function writes the 64-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer Pointer to a 64-bit value that may be unaligned.
+ @param Value 64-bit value to write to Buffer.
+
+ @return The 64-bit value to write to Buffer.
+
+**/
+UINT64
+EFIAPI
+WriteUnaligned64 (
+ OUT UINT64 *Buffer,
+ IN UINT64 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ WriteUnaligned32 ((UINT32*)Buffer, (UINT32)Value);
+ WriteUnaligned32 ((UINT32*)Buffer + 1, (UINT32)RShiftU64 (Value, 32));
+ return Value;
+}
diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf
index 1cadb67adf..b240364822 100644
--- a/MdePkg/Library/BaseLib/BaseLib.inf
+++ b/MdePkg/Library/BaseLib/BaseLib.inf
@@ -1,7 +1,8 @@
#/** @file
# Base Library implementation.
#
-# Copyright (c) 2007 - 2009, Intel Corporation.
+# Copyright (c) 2007 - 2009, Intel Corporation.<BR>
+# Portions Copyright (c) 2008-2009 Apple Inc.<BR>
#
# All rights reserved. This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -22,7 +23,7 @@
LIBRARY_CLASS = BaseLib
#
-# VALID_ARCHITECTURES = IA32 X64 IPF EBC
+# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM
#
[Sources.common]
@@ -583,6 +584,25 @@
Unaligned.c
Math64.c
+[Sources.ARM]
+ Arm/InternalSwitchStack.c
+ Arm/Unaligned.c
+ Math64.c
+
+ Arm/SwitchStack.asm | RVCT
+ Arm/SetJumpLongJump.asm | RVCT
+ Arm/DisableInterrupts.asm | RVCT
+ Arm/EnableInterrupts.asm | RVCT
+ Arm/GetInterruptsState.asm | RVCT
+ Arm/CpuPause.asm | RVCT
+ Arm/CpuBreakpoint.asm
+
+ Arm/GccInline.c | GCC
+ Arm/EnableInterrupts.S | GCC
+ Arm/DisableInterrupts.S | GCC
+ Arm/SetJumpLongJump.S | GCC
+ Arm/CpuBreakpoint.S | GCC
+
[Packages]
MdePkg/MdePkg.dec
diff --git a/MdePkg/Library/BaseLib/Ia32/GccInline.c b/MdePkg/Library/BaseLib/Ia32/GccInline.c
new file mode 100644
index 0000000000..6118676b4d
--- /dev/null
+++ b/MdePkg/Library/BaseLib/Ia32/GccInline.c
@@ -0,0 +1,1758 @@
+/** @file
+ GCC inline implementation of BaseLib processor specific functions.
+
+ Copyright (c) 2006 - 2007, Intel Corporation<BR>
+ Portions copyright (c) 2008-2009 Apple Inc.<BR>
+ All rights reserved. This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+
+#include "BaseLibInternals.h"
+
+
+
+/**
+ Used to serialize load and store operations.
+
+ All loads and stores that proceed calls to this function are guaranteed to be
+ globally visible when this function returns.
+
+**/
+VOID
+EFIAPI
+MemoryFence (
+ VOID
+ )
+{
+ // This is a little bit of overkill and it is more about the compiler that it is
+ // actually processor syncronization. This is like the _ReadWriteBarrier
+ // Microsft specific intrinsic
+ __asm__ __volatile__ ("":::"memory");
+}
+
+
+/**
+ Enables CPU interrupts.
+
+ Enables CPU interrupts.
+
+**/
+VOID
+EFIAPI
+EnableInterrupts (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("sti"::: "memory");
+}
+
+
+/**
+ Disables CPU interrupts.
+
+ Disables CPU interrupts.
+
+**/
+VOID
+EFIAPI
+DisableInterrupts (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("cli"::: "memory");
+}
+
+
+
+
+/**
+ Requests CPU to pause for a short period of time.
+
+ Requests CPU to pause for a short period of time. Typically used in MP
+ systems to prevent memory starvation while waiting for a spin lock.
+
+**/
+VOID
+EFIAPI
+CpuPause (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("pause");
+}
+
+
+/**
+ Generates a breakpoint on the CPU.
+
+ Generates a breakpoint on the CPU. The breakpoint must be implemented such
+ that code can resume normal execution after the breakpoint.
+
+**/
+VOID
+EFIAPI
+CpuBreakpoint (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("int $3");
+}
+
+
+
+/**
+ Returns a 64-bit Machine Specific Register(MSR).
+
+ Reads and returns the 64-bit MSR specified by Index. No parameter checking is
+ performed on Index, and some Index values may cause CPU exceptions. The
+ caller must either guarantee that Index is valid, or the caller must set up
+ exception handlers to catch the exceptions. This function is only available
+ on IA-32 and X64.
+
+ @param Index The 32-bit MSR index to read.
+
+ @return The value of the MSR identified by Index.
+
+**/
+UINT64
+EFIAPI
+AsmReadMsr64 (
+ IN UINT32 Index
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "rdmsr"
+ : "=A" (Data) // %0
+ : "c" (Index) // %1
+ );
+
+ return Data;
+}
+
+/**
+ Writes a 64-bit value to a Machine Specific Register(MSR), and returns the
+ value.
+
+ Writes the 64-bit value specified by Value to the MSR specified by Index. The
+ 64-bit value written to the MSR is returned. No parameter checking is
+ performed on Index or Value, and some of these may cause CPU exceptions. The
+ caller must either guarantee that Index and Value are valid, or the caller
+ must establish proper exception handlers. This function is only available on
+ IA-32 and X64.
+
+ @param Index The 32-bit MSR index to write.
+ @param Value The 64-bit value to write to the MSR.
+
+ @return Value
+
+**/
+UINT64
+EFIAPI
+AsmWriteMsr64 (
+ IN UINT32 Index,
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "wrmsr"
+ :
+ : "c" (Index),
+ "A" (Value)
+ );
+
+ return Value;
+}
+
+
+
+/**
+ Reads the current value of the EFLAGS register.
+
+ Reads and returns the current value of the EFLAGS register. This function is
+ only available on IA-32 and X64. This returns a 32-bit value on IA-32 and a
+ 64-bit value on X64.
+
+ @return EFLAGS on IA-32 or RFLAGS on X64.
+
+**/
+UINTN
+EFIAPI
+AsmReadEflags (
+ VOID
+ )
+{
+ UINTN Eflags;
+
+ __asm__ __volatile__ (
+ "pushfl \n\t"
+ "popl %0 "
+ : "=r" (Eflags)
+ );
+
+ return Eflags;
+}
+
+
+
+/**
+ Reads the current value of the Control Register 0 (CR0).
+
+ Reads and returns the current value of CR0. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 0 (CR0).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr0 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%cr0,%0"
+ : "=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of the Control Register 2 (CR2).
+
+ Reads and returns the current value of CR2. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 2 (CR2).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr2 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%cr2, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+/**
+ Reads the current value of the Control Register 3 (CR3).
+
+ Reads and returns the current value of CR3. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 3 (CR3).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr3 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%cr3, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of the Control Register 4 (CR4).
+
+ Reads and returns the current value of CR4. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 4 (CR4).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr4 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%cr4, %0"
+ : "=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes a value to Control Register 0 (CR0).
+
+ Writes and returns a new value to CR0. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr0 The value to write to CR0.
+
+ @return The value written to CR0.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr0 (
+ UINTN Cr0
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%cr0"
+ :
+ : "r" (Cr0)
+ );
+ return Cr0;
+}
+
+
+/**
+ Writes a value to Control Register 2 (CR2).
+
+ Writes and returns a new value to CR2. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr2 The value to write to CR2.
+
+ @return The value written to CR2.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr2 (
+ UINTN Cr2
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%cr2"
+ :
+ : "r" (Cr2)
+ );
+ return Cr2;
+}
+
+
+/**
+ Writes a value to Control Register 3 (CR3).
+
+ Writes and returns a new value to CR3. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr3 The value to write to CR3.
+
+ @return The value written to CR3.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr3 (
+ UINTN Cr3
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%cr3"
+ :
+ : "r" (Cr3)
+ );
+ return Cr3;
+}
+
+
+/**
+ Writes a value to Control Register 4 (CR4).
+
+ Writes and returns a new value to CR4. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr4 The value to write to CR4.
+
+ @return The value written to CR4.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr4 (
+ UINTN Cr4
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%cr4"
+ :
+ : "r" (Cr4)
+ );
+ return Cr4;
+}
+
+
+/**
+ Reads the current value of Debug Register 0 (DR0).
+
+ Reads and returns the current value of DR0. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 0 (DR0).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr0 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr0, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 1 (DR1).
+
+ Reads and returns the current value of DR1. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 1 (DR1).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr1 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr1, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 2 (DR2).
+
+ Reads and returns the current value of DR2. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 2 (DR2).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr2 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr2, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 3 (DR3).
+
+ Reads and returns the current value of DR3. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 3 (DR3).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr3 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr3, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 4 (DR4).
+
+ Reads and returns the current value of DR4. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 4 (DR4).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr4 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr4, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 5 (DR5).
+
+ Reads and returns the current value of DR5. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 5 (DR5).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr5 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr5, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 6 (DR6).
+
+ Reads and returns the current value of DR6. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 6 (DR6).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr6 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr6, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 7 (DR7).
+
+ Reads and returns the current value of DR7. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 7 (DR7).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr7 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "movl %%dr7, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes a value to Debug Register 0 (DR0).
+
+ Writes and returns a new value to DR0. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr0 The value to write to Dr0.
+
+ @return The value written to Debug Register 0 (DR0).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr0 (
+ UINTN Dr0
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr0"
+ :
+ : "r" (Dr0)
+ );
+ return Dr0;
+}
+
+
+/**
+ Writes a value to Debug Register 1 (DR1).
+
+ Writes and returns a new value to DR1. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr1 The value to write to Dr1.
+
+ @return The value written to Debug Register 1 (DR1).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr1 (
+ UINTN Dr1
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr1"
+ :
+ : "r" (Dr1)
+ );
+ return Dr1;
+}
+
+
+/**
+ Writes a value to Debug Register 2 (DR2).
+
+ Writes and returns a new value to DR2. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr2 The value to write to Dr2.
+
+ @return The value written to Debug Register 2 (DR2).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr2 (
+ UINTN Dr2
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr2"
+ :
+ : "r" (Dr2)
+ );
+ return Dr2;
+}
+
+
+/**
+ Writes a value to Debug Register 3 (DR3).
+
+ Writes and returns a new value to DR3. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr3 The value to write to Dr3.
+
+ @return The value written to Debug Register 3 (DR3).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr3 (
+ UINTN Dr3
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr3"
+ :
+ : "r" (Dr3)
+ );
+ return Dr3;
+}
+
+
+/**
+ Writes a value to Debug Register 4 (DR4).
+
+ Writes and returns a new value to DR4. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr4 The value to write to Dr4.
+
+ @return The value written to Debug Register 4 (DR4).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr4 (
+ UINTN Dr4
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr4"
+ :
+ : "r" (Dr4)
+ );
+ return Dr4;
+}
+
+
+/**
+ Writes a value to Debug Register 5 (DR5).
+
+ Writes and returns a new value to DR5. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr5 The value to write to Dr5.
+
+ @return The value written to Debug Register 5 (DR5).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr5 (
+ UINTN Dr5
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr5"
+ :
+ : "r" (Dr5)
+ );
+ return Dr5;
+}
+
+
+/**
+ Writes a value to Debug Register 6 (DR6).
+
+ Writes and returns a new value to DR6. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr6 The value to write to Dr6.
+
+ @return The value written to Debug Register 6 (DR6).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr6 (
+ UINTN Dr6
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr6"
+ :
+ : "r" (Dr6)
+ );
+ return Dr6;
+}
+
+
+/**
+ Writes a value to Debug Register 7 (DR7).
+
+ Writes and returns a new value to DR7. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr7 The value to write to Dr7.
+
+ @return The value written to Debug Register 7 (DR7).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr7 (
+ UINTN Dr7
+ )
+{
+ __asm__ __volatile__ (
+ "movl %0, %%dr7"
+ :
+ : "r" (Dr7)
+ );
+ return Dr7;
+}
+
+
+/**
+ Reads the current value of Code Segment Register (CS).
+
+ Reads and returns the current value of CS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of CS.
+
+**/
+UINT16
+EFIAPI
+AsmReadCs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%cs, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Data Segment Register (DS).
+
+ Reads and returns the current value of DS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of DS.
+
+**/
+UINT16
+EFIAPI
+AsmReadDs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%ds, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Extra Segment Register (ES).
+
+ Reads and returns the current value of ES. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of ES.
+
+**/
+UINT16
+EFIAPI
+AsmReadEs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%es, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of FS Data Segment Register (FS).
+
+ Reads and returns the current value of FS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of FS.
+
+**/
+UINT16
+EFIAPI
+AsmReadFs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%fs, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of GS Data Segment Register (GS).
+
+ Reads and returns the current value of GS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of GS.
+
+**/
+UINT16
+EFIAPI
+AsmReadGs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%gs, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Stack Segment Register (SS).
+
+ Reads and returns the current value of SS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of SS.
+
+**/
+UINT16
+EFIAPI
+AsmReadSs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%ds, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Task Register (TR).
+
+ Reads and returns the current value of TR. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of TR.
+
+**/
+UINT16
+EFIAPI
+AsmReadTr (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "str %0"
+ : "=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current Global Descriptor Table Register(GDTR) descriptor.
+
+ Reads and returns the current GDTR descriptor and returns it in Gdtr. This
+ function is only available on IA-32 and X64.
+
+ @param Gdtr Pointer to a GDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86ReadGdtr (
+ OUT IA32_DESCRIPTOR *Gdtr
+ )
+{
+ __asm__ __volatile__ (
+ "sgdt %0"
+ : "=m" (*Gdtr)
+ );
+}
+
+
+/**
+ Writes the current Global Descriptor Table Register (GDTR) descriptor.
+
+ Writes and the current GDTR descriptor specified by Gdtr. This function is
+ only available on IA-32 and X64.
+
+ @param Gdtr Pointer to a GDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86WriteGdtr (
+ IN CONST IA32_DESCRIPTOR *Gdtr
+ )
+{
+ __asm__ __volatile__ (
+ "lgdt %0"
+ :
+ : "m" (*Gdtr)
+ );
+
+}
+
+
+/**
+ Reads the current Interrupt Descriptor Table Register(GDTR) descriptor.
+
+ Reads and returns the current IDTR descriptor and returns it in Idtr. This
+ function is only available on IA-32 and X64.
+
+ @param Idtr Pointer to a IDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86ReadIdtr (
+ OUT IA32_DESCRIPTOR *Ldtr
+ )
+{
+ __asm__ __volatile__ (
+ "sldt %0"
+ : "=m" (*Ldtr)
+ );
+}
+
+
+/**
+ Writes the current Interrupt Descriptor Table Register(GDTR) descriptor.
+
+ Writes the current IDTR descriptor and returns it in Idtr. This function is
+ only available on IA-32 and X64.
+
+ @param Idtr Pointer to a IDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86WriteIdtr (
+ IN CONST IA32_DESCRIPTOR *Ldtr
+ )
+{
+ __asm__ __volatile__ (
+ "lidt %0"
+ :
+ : "m" (*Ldtr)
+ );
+}
+
+
+/**
+ Reads the current Local Descriptor Table Register(LDTR) selector.
+
+ Reads and returns the current 16-bit LDTR descriptor value. This function is
+ only available on IA-32 and X64.
+
+ @return The current selector of LDT.
+
+**/
+UINT16
+EFIAPI
+AsmReadLdtr (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "sldt %0"
+ : "=g" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes the current Local Descriptor Table Register (GDTR) selector.
+
+ Writes and the current LDTR descriptor specified by Ldtr. This function is
+ only available on IA-32 and X64.
+
+ @param Ldtr 16-bit LDTR selector value.
+
+**/
+VOID
+EFIAPI
+AsmWriteLdtr (
+ IN UINT16 Ldtr
+ )
+{
+ __asm__ __volatile__ (
+ "lldtw %0"
+ :
+ : "g" (Ldtr) // %0
+ );
+}
+
+
+/**
+ Save the current floating point/SSE/SSE2 context to a buffer.
+
+ Saves the current floating point/SSE/SSE2 state to the buffer specified by
+ Buffer. Buffer must be aligned on a 16-byte boundary. This function is only
+ available on IA-32 and X64.
+
+ @param Buffer Pointer to a buffer to save the floating point/SSE/SSE2 context.
+
+**/
+VOID
+EFIAPI
+InternalX86FxSave (
+ OUT IA32_FX_BUFFER *Buffer
+ )
+{
+ __asm__ __volatile__ (
+ "fxsave %0"
+ :
+ : "m" (*Buffer) // %0
+ );
+}
+
+
+/**
+ Restores the current floating point/SSE/SSE2 context from a buffer.
+
+ Restores the current floating point/SSE/SSE2 state from the buffer specified
+ by Buffer. Buffer must be aligned on a 16-byte boundary. This function is
+ only available on IA-32 and X64.
+
+ @param Buffer Pointer to a buffer to save the floating point/SSE/SSE2 context.
+
+**/
+VOID
+EFIAPI
+InternalX86FxRestore (
+ IN CONST IA32_FX_BUFFER *Buffer
+ )
+{
+ __asm__ __volatile__ (
+ "fxrstor %0"
+ :
+ : "m" (*Buffer) // %0
+ );
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #0 (MM0).
+
+ Reads and returns the current value of MM0. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM0.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm0 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm0, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #1 (MM1).
+
+ Reads and returns the current value of MM1. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM1.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm1 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm1, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #2 (MM2).
+
+ Reads and returns the current value of MM2. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM2.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm2 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm2, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #3 (MM3).
+
+ Reads and returns the current value of MM3. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM3.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm3 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm3, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #4 (MM4).
+
+ Reads and returns the current value of MM4. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM4.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm4 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm4, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #5 (MM5).
+
+ Reads and returns the current value of MM5. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM5.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm5 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm5, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #6 (MM6).
+
+ Reads and returns the current value of MM6. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM6.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm6 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm6, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #7 (MM7).
+
+ Reads and returns the current value of MM7. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM7.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm7 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "push %%eax \n\t"
+ "push %%eax \n\t"
+ "movq %%mm7, (%%esp)\n\t"
+ "pop %%eax \n\t"
+ "pop %%edx \n\t"
+ : "=A" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #0 (MM0).
+
+ Writes the current value of MM0. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM0.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm0 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm0" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #1 (MM1).
+
+ Writes the current value of MM1. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM1.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm1 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm1" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #2 (MM2).
+
+ Writes the current value of MM2. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM2.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm2 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm2" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #3 (MM3).
+
+ Writes the current value of MM3. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM3.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm3 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm3" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #4 (MM4).
+
+ Writes the current value of MM4. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM4.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm4 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm4" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #5 (MM5).
+
+ Writes the current value of MM5. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM5.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm5 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm5" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #6 (MM6).
+
+ Writes the current value of MM6. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM6.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm6 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm6" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #7 (MM7).
+
+ Writes the current value of MM7. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM7.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm7 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movq %0, %%mm7" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Reads the current value of Time Stamp Counter (TSC).
+
+ Reads and returns the current value of TSC. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of TSC
+
+**/
+UINT64
+EFIAPI
+AsmReadTsc (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "rdtsc"
+ : "=A" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of a Performance Counter (PMC).
+
+ Reads and returns the current value of performance counter specified by
+ Index. This function is only available on IA-32 and X64.
+
+ @param Index The 32-bit Performance Counter index to read.
+
+ @return The value of the PMC specified by Index.
+
+**/
+UINT64
+EFIAPI
+AsmReadPmc (
+ IN UINT32 Index
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "rdpmc"
+ : "=A" (Data)
+ : "c" (Index)
+ );
+
+ return Data;
+}
+
+
+
+
+/**
+ Executes a WBINVD instruction.
+
+ Executes a WBINVD instruction. This function is only available on IA-32 and
+ X64.
+
+**/
+VOID
+EFIAPI
+AsmWbinvd (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("wbinvd":::"memory");
+}
+
+
+/**
+ Executes a INVD instruction.
+
+ Executes a INVD instruction. This function is only available on IA-32 and
+ X64.
+
+**/
+VOID
+EFIAPI
+AsmInvd (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("invd":::"memory");
+
+}
+
+
+/**
+ Flushes a cache line from all the instruction and data caches within the
+ coherency domain of the CPU.
+
+ Flushed the cache line specified by LinearAddress, and returns LinearAddress.
+ This function is only available on IA-32 and X64.
+
+ @param LinearAddress The address of the cache line to flush. If the CPU is
+ in a physical addressing mode, then LinearAddress is a
+ physical address. If the CPU is in a virtual
+ addressing mode, then LinearAddress is a virtual
+ address.
+
+ @return LinearAddress
+**/
+VOID *
+EFIAPI
+AsmFlushCacheLine (
+ IN VOID *LinearAddress
+ )
+{
+ __asm__ __volatile__ (
+ "clflush (%0)"
+ : "+a" (LinearAddress)
+ :
+ : "memory"
+ );
+
+ return LinearAddress;
+}
+
+
diff --git a/MdePkg/Library/BaseLib/X64/GccInline.c b/MdePkg/Library/BaseLib/X64/GccInline.c
new file mode 100644
index 0000000000..3af0625b36
--- /dev/null
+++ b/MdePkg/Library/BaseLib/X64/GccInline.c
@@ -0,0 +1,1806 @@
+/** @file
+ GCC inline implementation of BaseLib processor specific functions.
+
+ Copyright (c) 2006 - 2007, Intel Corporation<BR>
+ Portions copyright (c) 2008-2009 Apple Inc.<BR>
+ All rights reserved. This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+
+#include "BaseLibInternals.h"
+
+
+
+
+/**
+ Used to serialize load and store operations.
+
+ All loads and stores that proceed calls to this function are guaranteed to be
+ globally visible when this function returns.
+
+**/
+VOID
+EFIAPI
+MemoryFence (
+ VOID
+ )
+{
+ // This is a little bit of overkill and it is more about the compiler that it is
+ // actually processor syncronization. This is like the _ReadWriteBarrier
+ // Microsft specific intrinsic
+ __asm__ __volatile__ ("":::"memory");
+}
+
+
+/**
+ Enables CPU interrupts.
+
+ Enables CPU interrupts.
+
+**/
+VOID
+EFIAPI
+EnableInterrupts (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("sti"::: "memory");
+}
+
+
+/**
+ Disables CPU interrupts.
+
+ Disables CPU interrupts.
+
+**/
+VOID
+EFIAPI
+DisableInterrupts (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("cli"::: "memory");
+}
+
+
+
+
+/**
+ Requests CPU to pause for a short period of time.
+
+ Requests CPU to pause for a short period of time. Typically used in MP
+ systems to prevent memory starvation while waiting for a spin lock.
+
+**/
+VOID
+EFIAPI
+CpuPause (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("pause");
+}
+
+
+/**
+ Generates a breakpoint on the CPU.
+
+ Generates a breakpoint on the CPU. The breakpoint must be implemented such
+ that code can resume normal execution after the breakpoint.
+
+**/
+VOID
+EFIAPI
+CpuBreakpoint (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("int $3");
+}
+
+
+
+/**
+ Returns a 64-bit Machine Specific Register(MSR).
+
+ Reads and returns the 64-bit MSR specified by Index. No parameter checking is
+ performed on Index, and some Index values may cause CPU exceptions. The
+ caller must either guarantee that Index is valid, or the caller must set up
+ exception handlers to catch the exceptions. This function is only available
+ on IA-32 and X64.
+
+ @param Index The 32-bit MSR index to read.
+
+ @return The value of the MSR identified by Index.
+
+**/
+UINT64
+EFIAPI
+AsmReadMsr64 (
+ IN UINT32 Index
+ )
+{
+ UINT32 LowData;
+ UINT32 HighData;
+
+ __asm__ __volatile__ (
+ "rdmsr"
+ : "=a" (LowData), // %0
+ "=d" (HighData) // %1
+ : "c" (Index) // %2
+ );
+
+ return (((UINT64)HighData) << 32) | LowData;
+}
+
+/**
+ Writes a 64-bit value to a Machine Specific Register(MSR), and returns the
+ value.
+
+ Writes the 64-bit value specified by Value to the MSR specified by Index. The
+ 64-bit value written to the MSR is returned. No parameter checking is
+ performed on Index or Value, and some of these may cause CPU exceptions. The
+ caller must either guarantee that Index and Value are valid, or the caller
+ must establish proper exception handlers. This function is only available on
+ IA-32 and X64.
+
+ @param Index The 32-bit MSR index to write.
+ @param Value The 64-bit value to write to the MSR.
+
+ @return Value
+
+**/
+UINT64
+EFIAPI
+AsmWriteMsr64 (
+ IN UINT32 Index,
+ IN UINT64 Value
+ )
+{
+ UINT32 LowData;
+ UINT32 HighData;
+
+ LowData = (UINT32)(Value);
+ HighData = (UINT32)(Value >> 32);
+
+ __asm__ __volatile__ (
+ "wrmsr"
+ :
+ : "c" (Index),
+ "a" (LowData),
+ "d" (HighData)
+ );
+
+ return Value;
+}
+
+
+
+/**
+ Reads the current value of the EFLAGS register.
+
+ Reads and returns the current value of the EFLAGS register. This function is
+ only available on IA-32 and X64. This returns a 32-bit value on IA-32 and a
+ 64-bit value on X64.
+
+ @return EFLAGS on IA-32 or RFLAGS on X64.
+
+**/
+UINTN
+EFIAPI
+AsmReadEflags (
+ VOID
+ )
+{
+ UINTN Eflags;
+
+ __asm__ __volatile__ (
+ "pushfq \n\t"
+ "pop %0 "
+ : "=r" (Eflags) // %0
+ );
+
+ return Eflags;
+}
+
+
+
+/**
+ Reads the current value of the Control Register 0 (CR0).
+
+ Reads and returns the current value of CR0. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 0 (CR0).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr0 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%cr0,%0"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of the Control Register 2 (CR2).
+
+ Reads and returns the current value of CR2. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 2 (CR2).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr2 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%cr2, %0"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+/**
+ Reads the current value of the Control Register 3 (CR3).
+
+ Reads and returns the current value of CR3. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 3 (CR3).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr3 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%cr3, %0"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of the Control Register 4 (CR4).
+
+ Reads and returns the current value of CR4. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of the Control Register 4 (CR4).
+
+**/
+UINTN
+EFIAPI
+AsmReadCr4 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%cr4, %0"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes a value to Control Register 0 (CR0).
+
+ Writes and returns a new value to CR0. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr0 The value to write to CR0.
+
+ @return The value written to CR0.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr0 (
+ UINTN Cr0
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%cr0"
+ :
+ : "r" (Cr0)
+ );
+ return Cr0;
+}
+
+
+/**
+ Writes a value to Control Register 2 (CR2).
+
+ Writes and returns a new value to CR2. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr2 The value to write to CR2.
+
+ @return The value written to CR2.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr2 (
+ UINTN Cr2
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%cr2"
+ :
+ : "r" (Cr2)
+ );
+ return Cr2;
+}
+
+
+/**
+ Writes a value to Control Register 3 (CR3).
+
+ Writes and returns a new value to CR3. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr3 The value to write to CR3.
+
+ @return The value written to CR3.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr3 (
+ UINTN Cr3
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%cr3"
+ :
+ : "r" (Cr3)
+ );
+ return Cr3;
+}
+
+
+/**
+ Writes a value to Control Register 4 (CR4).
+
+ Writes and returns a new value to CR4. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Cr4 The value to write to CR4.
+
+ @return The value written to CR4.
+
+**/
+UINTN
+EFIAPI
+AsmWriteCr4 (
+ UINTN Cr4
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%cr4"
+ :
+ : "r" (Cr4)
+ );
+ return Cr4;
+}
+
+
+/**
+ Reads the current value of Debug Register 0 (DR0).
+
+ Reads and returns the current value of DR0. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 0 (DR0).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr0 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr0, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 1 (DR1).
+
+ Reads and returns the current value of DR1. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 1 (DR1).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr1 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr1, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 2 (DR2).
+
+ Reads and returns the current value of DR2. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 2 (DR2).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr2 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr2, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 3 (DR3).
+
+ Reads and returns the current value of DR3. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 3 (DR3).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr3 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr3, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 4 (DR4).
+
+ Reads and returns the current value of DR4. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 4 (DR4).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr4 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr4, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 5 (DR5).
+
+ Reads and returns the current value of DR5. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 5 (DR5).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr5 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr5, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 6 (DR6).
+
+ Reads and returns the current value of DR6. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 6 (DR6).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr6 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr6, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Debug Register 7 (DR7).
+
+ Reads and returns the current value of DR7. This function is only available
+ on IA-32 and X64. This returns a 32-bit value on IA-32 and a 64-bit value on
+ X64.
+
+ @return The value of Debug Register 7 (DR7).
+
+**/
+UINTN
+EFIAPI
+AsmReadDr7 (
+ VOID
+ )
+{
+ UINTN Data;
+
+ __asm__ __volatile__ (
+ "mov %%dr7, %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes a value to Debug Register 0 (DR0).
+
+ Writes and returns a new value to DR0. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr0 The value to write to Dr0.
+
+ @return The value written to Debug Register 0 (DR0).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr0 (
+ UINTN Dr0
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr0"
+ :
+ : "r" (Dr0)
+ );
+ return Dr0;
+}
+
+
+/**
+ Writes a value to Debug Register 1 (DR1).
+
+ Writes and returns a new value to DR1. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr1 The value to write to Dr1.
+
+ @return The value written to Debug Register 1 (DR1).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr1 (
+ UINTN Dr1
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr1"
+ :
+ : "r" (Dr1)
+ );
+ return Dr1;
+}
+
+
+/**
+ Writes a value to Debug Register 2 (DR2).
+
+ Writes and returns a new value to DR2. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr2 The value to write to Dr2.
+
+ @return The value written to Debug Register 2 (DR2).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr2 (
+ UINTN Dr2
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr2"
+ :
+ : "r" (Dr2)
+ );
+ return Dr2;
+}
+
+
+/**
+ Writes a value to Debug Register 3 (DR3).
+
+ Writes and returns a new value to DR3. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr3 The value to write to Dr3.
+
+ @return The value written to Debug Register 3 (DR3).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr3 (
+ UINTN Dr3
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr3"
+ :
+ : "r" (Dr3)
+ );
+ return Dr3;
+}
+
+
+/**
+ Writes a value to Debug Register 4 (DR4).
+
+ Writes and returns a new value to DR4. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr4 The value to write to Dr4.
+
+ @return The value written to Debug Register 4 (DR4).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr4 (
+ UINTN Dr4
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr4"
+ :
+ : "r" (Dr4)
+ );
+ return Dr4;
+}
+
+
+/**
+ Writes a value to Debug Register 5 (DR5).
+
+ Writes and returns a new value to DR5. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr5 The value to write to Dr5.
+
+ @return The value written to Debug Register 5 (DR5).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr5 (
+ UINTN Dr5
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr5"
+ :
+ : "r" (Dr5)
+ );
+ return Dr5;
+}
+
+
+/**
+ Writes a value to Debug Register 6 (DR6).
+
+ Writes and returns a new value to DR6. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr6 The value to write to Dr6.
+
+ @return The value written to Debug Register 6 (DR6).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr6 (
+ UINTN Dr6
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr6"
+ :
+ : "r" (Dr6)
+ );
+ return Dr6;
+}
+
+
+/**
+ Writes a value to Debug Register 7 (DR7).
+
+ Writes and returns a new value to DR7. This function is only available on
+ IA-32 and X64. This writes a 32-bit value on IA-32 and a 64-bit value on X64.
+
+ @param Dr7 The value to write to Dr7.
+
+ @return The value written to Debug Register 7 (DR7).
+
+**/
+UINTN
+EFIAPI
+AsmWriteDr7 (
+ UINTN Dr7
+ )
+{
+ __asm__ __volatile__ (
+ "mov %0, %%dr7"
+ :
+ : "r" (Dr7)
+ );
+ return Dr7;
+}
+
+
+/**
+ Reads the current value of Code Segment Register (CS).
+
+ Reads and returns the current value of CS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of CS.
+
+**/
+UINT16
+EFIAPI
+AsmReadCs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%cs, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Data Segment Register (DS).
+
+ Reads and returns the current value of DS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of DS.
+
+**/
+UINT16
+EFIAPI
+AsmReadDs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%ds, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Extra Segment Register (ES).
+
+ Reads and returns the current value of ES. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of ES.
+
+**/
+UINT16
+EFIAPI
+AsmReadEs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%es, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of FS Data Segment Register (FS).
+
+ Reads and returns the current value of FS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of FS.
+
+**/
+UINT16
+EFIAPI
+AsmReadFs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%fs, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of GS Data Segment Register (GS).
+
+ Reads and returns the current value of GS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of GS.
+
+**/
+UINT16
+EFIAPI
+AsmReadGs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%gs, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Stack Segment Register (SS).
+
+ Reads and returns the current value of SS. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of SS.
+
+**/
+UINT16
+EFIAPI
+AsmReadSs (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "mov %%ds, %0"
+ :"=a" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of Task Register (TR).
+
+ Reads and returns the current value of TR. This function is only available on
+ IA-32 and X64.
+
+ @return The current value of TR.
+
+**/
+UINT16
+EFIAPI
+AsmReadTr (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "str %0"
+ : "=r" (Data)
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current Global Descriptor Table Register(GDTR) descriptor.
+
+ Reads and returns the current GDTR descriptor and returns it in Gdtr. This
+ function is only available on IA-32 and X64.
+
+ @param Gdtr Pointer to a GDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86ReadGdtr (
+ OUT IA32_DESCRIPTOR *Gdtr
+ )
+{
+ __asm__ __volatile__ (
+ "sgdt %0"
+ : "=m" (*Gdtr)
+ );
+}
+
+
+/**
+ Writes the current Global Descriptor Table Register (GDTR) descriptor.
+
+ Writes and the current GDTR descriptor specified by Gdtr. This function is
+ only available on IA-32 and X64.
+
+ @param Gdtr Pointer to a GDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86WriteGdtr (
+ IN CONST IA32_DESCRIPTOR *Gdtr
+ )
+{
+ __asm__ __volatile__ (
+ "lgdt %0"
+ :
+ : "m" (*Gdtr)
+ );
+
+}
+
+
+/**
+ Reads the current Interrupt Descriptor Table Register(GDTR) descriptor.
+
+ Reads and returns the current IDTR descriptor and returns it in Idtr. This
+ function is only available on IA-32 and X64.
+
+ @param Idtr Pointer to a IDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86ReadIdtr (
+ OUT IA32_DESCRIPTOR *Ldtr
+ )
+{
+ __asm__ __volatile__ (
+ "sldt %0"
+ : "=m" (*Ldtr)
+ );
+}
+
+
+/**
+ Writes the current Interrupt Descriptor Table Register(GDTR) descriptor.
+
+ Writes the current IDTR descriptor and returns it in Idtr. This function is
+ only available on IA-32 and X64.
+
+ @param Idtr Pointer to a IDTR descriptor.
+
+**/
+VOID
+EFIAPI
+InternalX86WriteIdtr (
+ IN CONST IA32_DESCRIPTOR *Ldtr
+ )
+{
+ __asm__ __volatile__ (
+ "lidt %0"
+ :
+ : "m" (*Ldtr)
+ );
+}
+
+
+/**
+ Reads the current Local Descriptor Table Register(LDTR) selector.
+
+ Reads and returns the current 16-bit LDTR descriptor value. This function is
+ only available on IA-32 and X64.
+
+ @return The current selector of LDT.
+
+**/
+UINT16
+EFIAPI
+AsmReadLdtr (
+ VOID
+ )
+{
+ UINT16 Data;
+
+ __asm__ __volatile__ (
+ "sldt %0"
+ : "=g" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes the current Local Descriptor Table Register (GDTR) selector.
+
+ Writes and the current LDTR descriptor specified by Ldtr. This function is
+ only available on IA-32 and X64.
+
+ @param Ldtr 16-bit LDTR selector value.
+
+**/
+VOID
+EFIAPI
+AsmWriteLdtr (
+ IN UINT16 Ldtr
+ )
+{
+ __asm__ __volatile__ (
+ "lldtw %0"
+ :
+ : "g" (Ldtr) // %0
+ );
+}
+
+
+/**
+ Save the current floating point/SSE/SSE2 context to a buffer.
+
+ Saves the current floating point/SSE/SSE2 state to the buffer specified by
+ Buffer. Buffer must be aligned on a 16-byte boundary. This function is only
+ available on IA-32 and X64.
+
+ @param Buffer Pointer to a buffer to save the floating point/SSE/SSE2 context.
+
+**/
+VOID
+EFIAPI
+InternalX86FxSave (
+ OUT IA32_FX_BUFFER *Buffer
+ )
+{
+ __asm__ __volatile__ (
+ "fxsave %0"
+ :
+ : "m" (*Buffer) // %0
+ );
+}
+
+
+/**
+ Restores the current floating point/SSE/SSE2 context from a buffer.
+
+ Restores the current floating point/SSE/SSE2 state from the buffer specified
+ by Buffer. Buffer must be aligned on a 16-byte boundary. This function is
+ only available on IA-32 and X64.
+
+ @param Buffer Pointer to a buffer to save the floating point/SSE/SSE2 context.
+
+**/
+VOID
+EFIAPI
+InternalX86FxRestore (
+ IN CONST IA32_FX_BUFFER *Buffer
+ )
+{
+ __asm__ __volatile__ (
+ "fxrstor %0"
+ :
+ : "m" (*Buffer) // %0
+ );
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #0 (MM0).
+
+ Reads and returns the current value of MM0. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM0.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm0 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm0, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #1 (MM1).
+
+ Reads and returns the current value of MM1. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM1.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm1 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm1, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #2 (MM2).
+
+ Reads and returns the current value of MM2. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM2.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm2 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm2, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #3 (MM3).
+
+ Reads and returns the current value of MM3. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM3.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm3 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm3, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #4 (MM4).
+
+ Reads and returns the current value of MM4. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM4.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm4 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm4, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #5 (MM5).
+
+ Reads and returns the current value of MM5. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM5.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm5 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm5, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #6 (MM6).
+
+ Reads and returns the current value of MM6. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM6.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm6 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm6, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Reads the current value of 64-bit MMX Register #7 (MM7).
+
+ Reads and returns the current value of MM7. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of MM7.
+
+**/
+UINT64
+EFIAPI
+AsmReadMm7 (
+ VOID
+ )
+{
+ UINT64 Data;
+
+ __asm__ __volatile__ (
+ "movd %%mm7, %0 \n\t"
+ : "=r" (Data) // %0
+ );
+
+ return Data;
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #0 (MM0).
+
+ Writes the current value of MM0. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM0.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm0 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm0" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #1 (MM1).
+
+ Writes the current value of MM1. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM1.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm1 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm1" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #2 (MM2).
+
+ Writes the current value of MM2. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM2.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm2 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm2" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #3 (MM3).
+
+ Writes the current value of MM3. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM3.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm3 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm3" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #4 (MM4).
+
+ Writes the current value of MM4. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM4.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm4 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm4" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #5 (MM5).
+
+ Writes the current value of MM5. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM5.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm5 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm5" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #6 (MM6).
+
+ Writes the current value of MM6. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM6.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm6 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm6" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Writes the current value of 64-bit MMX Register #7 (MM7).
+
+ Writes the current value of MM7. This function is only available on IA32 and
+ X64.
+
+ @param Value The 64-bit value to write to MM7.
+
+**/
+VOID
+EFIAPI
+AsmWriteMm7 (
+ IN UINT64 Value
+ )
+{
+ __asm__ __volatile__ (
+ "movd %0, %%mm7" // %0
+ :
+ : "m" (Value)
+ );
+}
+
+
+/**
+ Reads the current value of Time Stamp Counter (TSC).
+
+ Reads and returns the current value of TSC. This function is only available
+ on IA-32 and X64.
+
+ @return The current value of TSC
+
+**/
+UINT64
+EFIAPI
+AsmReadTsc (
+ VOID
+ )
+{
+ UINT32 LowData;
+ UINT32 HiData;
+
+ __asm__ __volatile__ (
+ "rdtsc"
+ : "=a" (LowData),
+ "=d" (HiData)
+ );
+
+ return (((UINT64)HiData) << 32) | LowData;
+}
+
+
+/**
+ Reads the current value of a Performance Counter (PMC).
+
+ Reads and returns the current value of performance counter specified by
+ Index. This function is only available on IA-32 and X64.
+
+ @param Index The 32-bit Performance Counter index to read.
+
+ @return The value of the PMC specified by Index.
+
+**/
+UINT64
+EFIAPI
+AsmReadPmc (
+ IN UINT32 Index
+ )
+{
+ UINT32 LowData;
+ UINT32 HiData;
+
+ __asm__ __volatile__ (
+ "rdpmc"
+ : "=a" (LowData),
+ "=d" (HiData)
+ : "c" (Index)
+ );
+
+ return (((UINT64)HiData) << 32) | LowData;
+}
+
+
+/**
+ Sets up a monitor buffer that is used by AsmMwait().
+
+ Executes a MONITOR instruction with the register state specified by Eax, Ecx
+ and Edx. Returns Eax. This function is only available on IA-32 and X64.
+
+ @param Eax The value to load into EAX or RAX before executing the MONITOR
+ instruction.
+ @param Ecx The value to load into ECX or RCX before executing the MONITOR
+ instruction.
+ @param Edx The value to load into EDX or RDX before executing the MONITOR
+ instruction.
+
+ @return Eax
+
+**/
+UINTN
+EFIAPI
+AsmMonitor (
+ IN UINTN Eax,
+ IN UINTN Ecx,
+ IN UINTN Edx
+ )
+{
+ __asm__ __volatile__ (
+ "monitor"
+ :
+ : "a" (Eax),
+ "c" (Ecx),
+ "d" (Edx)
+ );
+
+ return Eax;
+}
+
+
+/**
+ Executes an MWAIT instruction.
+
+ Executes an MWAIT instruction with the register state specified by Eax and
+ Ecx. Returns Eax. This function is only available on IA-32 and X64.
+
+ @param Eax The value to load into EAX or RAX before executing the MONITOR
+ instruction.
+ @param Ecx The value to load into ECX or RCX before executing the MONITOR
+ instruction.
+
+ @return Eax
+
+**/
+UINTN
+EFIAPI
+AsmMwait (
+ IN UINTN Eax,
+ IN UINTN Ecx
+ )
+{
+ __asm__ __volatile__ (
+ "mwait"
+ :
+ : "a" (Eax),
+ "c" (Ecx)
+ );
+
+ return Eax;
+}
+
+
+/**
+ Executes a WBINVD instruction.
+
+ Executes a WBINVD instruction. This function is only available on IA-32 and
+ X64.
+
+**/
+VOID
+EFIAPI
+AsmWbinvd (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("wbinvd":::"memory");
+}
+
+
+/**
+ Executes a INVD instruction.
+
+ Executes a INVD instruction. This function is only available on IA-32 and
+ X64.
+
+**/
+VOID
+EFIAPI
+AsmInvd (
+ VOID
+ )
+{
+ __asm__ __volatile__ ("invd":::"memory");
+
+}
+
+
+/**
+ Flushes a cache line from all the instruction and data caches within the
+ coherency domain of the CPU.
+
+ Flushed the cache line specified by LinearAddress, and returns LinearAddress.
+ This function is only available on IA-32 and X64.
+
+ @param LinearAddress The address of the cache line to flush. If the CPU is
+ in a physical addressing mode, then LinearAddress is a
+ physical address. If the CPU is in a virtual
+ addressing mode, then LinearAddress is a virtual
+ address.
+
+ @return LinearAddress
+**/
+VOID *
+EFIAPI
+AsmFlushCacheLine (
+ IN VOID *LinearAddress
+ )
+{
+ __asm__ __volatile__ (
+ "clflush (%0)"
+ :
+ : "r" (LinearAddress)
+ : "memory"
+ );
+
+ return LinearAddress;
+}
+
+