diff options
Diffstat (limited to 'Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64')
11 files changed, 4800 insertions, 0 deletions
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/Exception.c b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/Exception.c new file mode 100644 index 0000000000..3860587b0f --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/Exception.c @@ -0,0 +1,382 @@ +/** @file
+ EM64T Exception Handler.
+
+ Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Library/BaseMemoryLib.h>
+#include <Library/ReportStatusCodeLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Private/Library/CpuCommonLib.h>
+#include "CpuInitDxe.h"
+#include "MpCommon.h"
+#include "Exception.h"
+
+typedef
+VOID
+(*EFI_INSTALL_EXCEPTION)(
+ IN UINT32 InterruptType,
+ IN VOID *SystemContext
+ );
+
+typedef struct {
+ UINT32 ErrorMessage;
+ UINT8 Interrupt;
+} EFI_EXCEPTION_HANDLER;
+
+//
+// Error code flag indicating whether or not an error code will be
+// pushed on the stack if an exception occurs.
+//
+// 1 means an error code will be pushed, otherwise 0
+//
+// bit 0 - exception 0
+// bit 1 - exception 1
+// etc.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINT32 mErrorCodeFlag = 0x00027d00;
+
+//
+// Local Table
+//
+GLOBAL_REMOVE_IF_UNREFERENCED EFI_EXCEPTION_HANDLER mExceptionTable[] = {
+ {
+ EFI_SW_EC_IA32_DIVIDE_ERROR,
+ INTERRUPT_HANDLER_DIVIDE_ZERO
+ },
+ {
+ EFI_SW_EC_IA32_DEBUG,
+ INTERRUPT_HANDLER_DEBUG
+ },
+ {
+ EFI_SW_EC_IA32_NMI,
+ INTERRUPT_HANDLER_NMI
+ },
+ {
+ EFI_SW_EC_IA32_BREAKPOINT,
+ INTERRUPT_HANDLER_BREAKPOINT
+ },
+ {
+ EFI_SW_EC_IA32_OVERFLOW,
+ INTERRUPT_HANDLER_OVERFLOW
+ },
+ {
+ EFI_SW_EC_IA32_BOUND,
+ INTERRUPT_HANDLER_BOUND
+ },
+ {
+ EFI_SW_EC_IA32_INVALID_OPCODE,
+ INTERRUPT_HANDLER_INVALID_OPCODE
+ },
+ //
+ // Interrupt 7, 9, 15 not defined in the debug support protocol. Hence no status codes for them!
+ //
+ {
+ EFI_SW_EC_IA32_DOUBLE_FAULT,
+ INTERRUPT_HANDLER_DOUBLE_FAULT
+ },
+ {
+ EFI_SW_EC_IA32_INVALID_TSS,
+ INTERRUPT_HANDLER_INVALID_TSS
+ },
+ {
+ EFI_SW_EC_IA32_SEG_NOT_PRESENT,
+ INTERRUPT_HANDLER_SEGMENT_NOT_PRESENT
+ },
+ {
+ EFI_SW_EC_IA32_STACK_FAULT,
+ INTERRUPT_HANDLER_STACK_SEGMENT_FAULT
+ },
+ {
+ EFI_SW_EC_IA32_GP_FAULT,
+ INTERRUPT_HANDLER_GP_FAULT
+ },
+ {
+ EFI_SW_EC_IA32_PAGE_FAULT,
+ INTERRUPT_HANDLER_PAGE_FAULT
+ },
+ {
+ EFI_SW_EC_IA32_FP_ERROR,
+ INTERRUPT_HANDLER_MATH_FAULT
+ },
+ {
+ EFI_SW_EC_IA32_ALIGNMENT_CHECK,
+ INTERRUPT_HANDLER_ALIGNMENT_FAULT
+ },
+ {
+ EFI_SW_EC_IA32_MACHINE_CHECK,
+ INTERRUPT_HANDLER_MACHINE_CHECK
+ },
+ {
+ EFI_SW_EC_IA32_SIMD,
+ INTERRUPT_HANDLER_STREAMING_SIMD
+ }
+};
+
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mExceptionNumber = sizeof (mExceptionTable) / sizeof (EFI_EXCEPTION_HANDLER);
+
+GLOBAL_REMOVE_IF_UNREFERENCED CPU_STATUS_CODE_TEMPLATE mStatusCodeData = {
+ {
+ sizeof (EFI_STATUS_CODE_DATA),
+ sizeof (EFI_SYSTEM_CONTEXT_X64),
+ { 0 }
+ },
+ {
+ { 0 }
+ }
+};
+
+GLOBAL_REMOVE_IF_UNREFERENCED UINT8 mExceptionLock = 0;
+
+/**
+ Report StatusCode for Exception
+
+ @param[in] InterruptType Interrupt type
+ @param[in] SystemContext EFI_SYSTEM_CONTEXT
+
+ @retval EFI_SUCCESS
+
+**/
+EFI_STATUS
+ReportData (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINT32 ErrorMessage;
+ UINT32 Index;
+
+ CopyMem (
+ &mStatusCodeData.SystemContext.SystemContextX64,
+ SystemContext.SystemContextX64,
+ sizeof (EFI_SYSTEM_CONTEXT_X64)
+ );
+
+ ErrorMessage = EFI_SOFTWARE_DXE_BS_DRIVER;
+ for (Index = 0; Index < mExceptionNumber; Index++) {
+ if (mExceptionTable[Index].Interrupt == InterruptType) {
+ ErrorMessage |= mExceptionTable[Index].ErrorMessage;
+ break;
+ }
+ }
+
+ ReportStatusCode (
+ (EFI_ERROR_CODE | EFI_ERROR_UNRECOVERED),
+ EFI_SOFTWARE_UNSPECIFIED | ErrorMessage
+ );
+
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Common exception handler
+
+ @param[in] InterruptType Exception type
+ @param[in] SystemContext EFI_SYSTEM_CONTEXT
+
+**/
+VOID
+EFIAPI
+CommonExceptionHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ AsmAcquireMPLock (&mExceptionLock);
+
+ DEBUG (
+ (DEBUG_ERROR,
+ "!!!! X64 Exception Type - %016lx CPU Apic ID - %08x!!!!\n",
+ InterruptType,
+ GetCpuApicId ())
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "RIP - %016lx, CS - %016lx, RFLAGS - %016lx\n",
+ SystemContext.SystemContextX64->Rip,
+ SystemContext.SystemContextX64->Cs,
+ SystemContext.SystemContextX64->Rflags)
+ );
+ if (mErrorCodeFlag & (1 << InterruptType)) {
+ DEBUG (
+ (DEBUG_ERROR,
+ "ExceptionData - %016lx\n",
+ SystemContext.SystemContextX64->ExceptionData)
+ );
+ }
+
+ DEBUG (
+ (DEBUG_ERROR,
+ "RAX - %016lx, RCX - %016lx, RDX - %016lx\n",
+ SystemContext.SystemContextX64->Rax,
+ SystemContext.SystemContextX64->Rcx,
+ SystemContext.SystemContextX64->Rdx)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "RBX - %016lx, RSP - %016lx, RBP - %016lx\n",
+ SystemContext.SystemContextX64->Rbx,
+ SystemContext.SystemContextX64->Rsp,
+ SystemContext.SystemContextX64->Rbp)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "RSI - %016lx, RDI - %016lx\n",
+ SystemContext.SystemContextX64->Rsi,
+ SystemContext.SystemContextX64->Rdi)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "R8 - %016lx, R9 - %016lx, R10 - %016lx\n",
+ SystemContext.SystemContextX64->R8,
+ SystemContext.SystemContextX64->R9,
+ SystemContext.SystemContextX64->R10)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "R11 - %016lx, R12 - %016lx, R13 - %016lx\n",
+ SystemContext.SystemContextX64->R11,
+ SystemContext.SystemContextX64->R12,
+ SystemContext.SystemContextX64->R13)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "R14 - %016lx, R15 - %016lx\n",
+ SystemContext.SystemContextX64->R14,
+ SystemContext.SystemContextX64->R15)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "DS - %016lx, ES - %016lx, FS - %016lx\n",
+ SystemContext.SystemContextX64->Ds,
+ SystemContext.SystemContextX64->Es,
+ SystemContext.SystemContextX64->Fs)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "GS - %016lx, SS - %016lx\n",
+ SystemContext.SystemContextX64->Gs,
+ SystemContext.SystemContextX64->Ss)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "GDTR - %016lx %016lx, LDTR - %016lx\n",
+ SystemContext.SystemContextX64->Gdtr[0],
+ SystemContext.SystemContextX64->Gdtr[1],
+ SystemContext.SystemContextX64->Ldtr)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "IDTR - %016lx %016lx, TR - %016lx\n",
+ SystemContext.SystemContextX64->Idtr[0],
+ SystemContext.SystemContextX64->Idtr[1],
+ SystemContext.SystemContextX64->Tr)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "CR0 - %016lx, CR2 - %016lx, CR3 - %016lx\n",
+ SystemContext.SystemContextX64->Cr0,
+ SystemContext.SystemContextX64->Cr2,
+ SystemContext.SystemContextX64->Cr3)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "CR4 - %016lx, CR8 - %016lx\n",
+ SystemContext.SystemContextX64->Cr4,
+ SystemContext.SystemContextX64->Cr8)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "DR0 - %016lx, DR1 - %016lx, DR2 - %016lx\n",
+ SystemContext.SystemContextX64->Dr0,
+ SystemContext.SystemContextX64->Dr1,
+ SystemContext.SystemContextX64->Dr2)
+ );
+ DEBUG (
+ (DEBUG_ERROR,
+ "DR3 - %016lx, DR6 - %016lx, DR7 - %016lx\n",
+ SystemContext.SystemContextX64->Dr3,
+ SystemContext.SystemContextX64->Dr6,
+ SystemContext.SystemContextX64->Dr7)
+ );
+
+ //
+ // Report Status Code
+ //
+ ReportData (InterruptType, SystemContext);
+ AsmReleaseMPLock (&mExceptionLock);
+
+ //
+ // Use this macro to hang so that the compiler does not optimize out
+ // the following RET instructions. This allows us to return if we
+ // have a debugger attached.
+ //
+ CpuDeadLoop ();
+
+ return;
+}
+
+
+/**
+ Install the IA-32 EM64T Exception Handler.
+ The current operation (which likely will change) will uninstall all the
+ pertinent exception handlers (0-7, 10-14, 16-19) except for Int8 which the timer
+ is currently sitting on (or soon will be).
+
+ It then installs all the appropriate handlers for each exception.
+
+ The handler then calls gRT->ReportStatusCode with a specific progress code. The
+ progress codes for now start at 0x200 for IA-32 processors. See Status Code
+ Specification for details. The Status code Specification uses the enumeration from
+ the EFI 1.1 Debug Support Protocol.
+
+ @param[in] CpuProtocol Instance of CPU Arch Protocol
+
+ @retval EFI_SUCCESS This function always return success after registering handlers.
+
+**/
+EFI_STATUS
+InitializeException (
+ IN EFI_CPU_ARCH_PROTOCOL *CpuProtocol
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+
+ mStatusCodeData.Header.Type = gCpuStatusCodeDataTypeExceptionHandlerGuid;
+
+ CpuProtocol->DisableInterrupt (CpuProtocol);
+
+ for (Index = 0; Index < mExceptionNumber; Index++) {
+
+ Status = CpuProtocol->RegisterInterruptHandler (
+ CpuProtocol,
+ mExceptionTable[Index].Interrupt,
+ NULL
+ );
+
+ //
+ // Add in our handler
+ //
+ Status = CpuProtocol->RegisterInterruptHandler (
+ CpuProtocol,
+ mExceptionTable[Index].Interrupt,
+ CommonExceptionHandler
+ );
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MemoryOperation.c b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MemoryOperation.c new file mode 100644 index 0000000000..2e665ebf53 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MemoryOperation.c @@ -0,0 +1,800 @@ +/** @file
+ Memory Operation Functions for IA32 Architecture.
+
+ Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include "CpuInitDxe.h"
+#include "MpCommon.h"
+#include "VirtualMemory.h"
+
+VOID
+InitializeExternalVectorTablePtr (
+ EFI_CPU_INTERRUPT_HANDLER *VectorTable
+ );
+
+extern EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[];
+extern EFI_PHYSICAL_ADDRESS mBackupBuffer;
+
+#if defined(__GNUC__)
+ extern UINT64 gCpuInitFloatPointUnit;
+#endif
+
+GLOBAL_REMOVE_IF_UNREFERENCED UINT8 *mPageStore = NULL;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mPageStoreSize = 16;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mPageStoreIndex = 0;
+
+GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mValidMtrrAddressMask;
+GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mValidMtrrBitsMask;
+
+#if defined (__GNUC__)
+#define ALIGN_16BYTE_BOUNDRY __attribute__ ((aligned (16)))
+#else
+#define ALIGN_16BYTE_BOUNDRY __declspec (align (16))
+#endif
+
+#pragma pack(1)
+typedef struct {
+ UINT16 LimitLow;
+ UINT16 BaseLow;
+ UINT8 BaseMiddle;
+ UINT8 Attributes1;
+ UINT8 Attributes2;
+ UINT8 BaseHigh;
+} SEGMENT_DESCRIPTOR_x64;
+
+typedef struct {
+ UINT16 Limit;
+ UINTN Base;
+} PSEUDO_DESCRIPTOR_x64;
+
+#pragma pack()
+
+GLOBAL_REMOVE_IF_UNREFERENCED ALIGN_16BYTE_BOUNDRY SEGMENT_DESCRIPTOR_x64 gGdt[] = {
+ { /// NULL Selector: selector[0]
+ 0, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0, ///
+ 0, /// type & limit 19:16
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// Linear Selector: selector[8]
+ 0xffff, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0x93, /// present, ring 0, data, expand-up writable
+ 0xcf, /// type & limit 19:16
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// Linear code Selector: selector[10]
+ 0xffff, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0x9b, /// present, ring 0, code, expand-up writable
+ 0xcf, /// type & limit 19:16
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// Compatibility mode data Selector: selector[18]
+ 0xffff, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0x93, /// type & limit 19:16
+ 0xcf,
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// Compatibility code Selector: selector[20]
+ 0xffff, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0x9b, /// type & limit 19:16
+ 0xcf,
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// Spare3 Selector: selector[28]
+ 0, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0, /// type & limit 19:16
+ 0, /// base 31:24
+ 0,
+ ///
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// 64-bit data Selector:selector[30]
+ 0xffff, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0x93, /// type & limit 19:16
+ 0xcf,
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// 64-bit code Selector: selector[38]
+ 0xffff, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0x9b, /// type & limit 19:16
+ 0xaf,
+ 0, /// base 31:24
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ },
+ { /// Spare3 Selector: selector[40]
+ 0, /// limit 15:0
+ 0, /// base 15:0
+ 0, /// base 23:16
+ 0, /// type & limit 19:16
+ 0, /// base 31:24
+ 0,
+ ///
+ /// 0, /// base 63:32
+ /// 0 /// reserved
+ ///
+ }
+};
+
+GLOBAL_REMOVE_IF_UNREFERENCED ALIGN_16BYTE_BOUNDRY PSEUDO_DESCRIPTOR_x64 gGdtPseudoDescriptor = {
+ sizeof (gGdt) - 1,
+ (UINTN) gGdt
+};
+
+GLOBAL_REMOVE_IF_UNREFERENCED INTERRUPT_GATE_DESCRIPTOR gIdtTable[INTERRUPT_VECTOR_NUMBER] = { { 0, 0, 0, 0, 0, 0 } };
+
+INTERRUPT_GATE_DESCRIPTOR gOrigIdtTable[INTERRUPT_VECTOR_NUMBER] = { { 0, 0, 0, 0, 0, 0 } };
+UINTN mOrigIdtEntryCount = 0;
+
+GLOBAL_REMOVE_IF_UNREFERENCED ALIGN_16BYTE_BOUNDRY PSEUDO_DESCRIPTOR_x64 gLidtPseudoDescriptor = {
+ sizeof (gIdtTable) - 1,
+ (UINTN) gIdtTable
+};
+
+/**
+ Init Global Descriptor table
+
+**/
+VOID
+InitializeSelectors (
+ VOID
+ )
+{
+ AsmWriteGdtr ((IA32_DESCRIPTOR *) &gGdtPseudoDescriptor);
+}
+
+
+/**
+ Generic IDT Vector Handlers for the Host
+
+**/
+VOID
+AsmIdtVector00 (
+ VOID
+ );
+
+
+/**
+ Initialize Interrupt descriptor Tables
+
+**/
+VOID
+InitializeInterruptTables (
+ VOID
+ )
+{
+ UINT16 CodeSegment;
+ INTERRUPT_GATE_DESCRIPTOR *IdtEntry;
+ UINT8 *CurrentHandler;
+ UINT32 Index;
+ IA32_DESCRIPTOR IdtrForBsp;
+
+ CodeSegment = AsmReadCs ();
+
+ IdtEntry = gIdtTable;
+ CurrentHandler = (UINT8 *) (UINTN) AsmIdtVector00;
+ for (Index = 0; Index < INTERRUPT_VECTOR_NUMBER; Index++) {
+ IdtEntry[Index].Offset15To0 = (UINT16) (UINTN) CurrentHandler;
+ IdtEntry[Index].SegmentSelector = CodeSegment;
+ IdtEntry[Index].Attributes = INTERRUPT_GATE_ATTRIBUTE;
+
+ //
+ // 8e00;
+ //
+ IdtEntry[Index].Offset31To16 = (UINT16) ((UINTN) CurrentHandler >> 16);
+ IdtEntry[Index].Offset63To32 = (UINT32) ((UINTN) CurrentHandler >> 32);
+ CurrentHandler += 0x8;
+ }
+
+
+ AsmReadIdtr (&IdtrForBsp);
+
+ //
+ // Size of the IDT currently programmed (number of entries)
+ //
+ mOrigIdtEntryCount = (IdtrForBsp.Limit + 1) / sizeof (INTERRUPT_GATE_DESCRIPTOR);
+
+ //
+ // Update debugger CS with DxeCpu CS.
+ //
+ IdtEntry = (INTERRUPT_GATE_DESCRIPTOR *) IdtrForBsp.Base;
+ for (Index = 0; Index < mOrigIdtEntryCount; Index++) {
+ IdtEntry[Index].SegmentSelector = CodeSegment;
+ }
+ //
+ // Save a copy of the original IDT
+ //
+ CopyMem (&gOrigIdtTable, (VOID *) IdtrForBsp.Base, IdtrForBsp.Limit + 1);
+
+#ifdef SOURCE_DEBUG_ENABLE
+ //
+ // Use the original IDT table.
+ //
+ gLidtPseudoDescriptor.Limit = sizeof (gOrigIdtTable) - 1;
+ gLidtPseudoDescriptor.Base = (UINTN) gOrigIdtTable;
+#endif
+
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &gLidtPseudoDescriptor);
+
+ return;
+}
+
+
+/**
+ Set Interrupt Descriptor Table Handler Address.
+
+ @param[in] Index The Index of the interrupt descriptor table handle.
+
+**/
+VOID
+SetInterruptDescriptorTableHandlerAddress (
+ IN UINTN Index
+ )
+{
+ IA32_DESCRIPTOR IdtrForBsp;
+ INTERRUPT_GATE_DESCRIPTOR *IdtEntry;
+
+ AsmReadIdtr (&IdtrForBsp);
+ IdtEntry = (INTERRUPT_GATE_DESCRIPTOR *) IdtrForBsp.Base;
+
+ //
+ // Plug in CPU Driver version
+ //
+ CopyMem (&IdtEntry[Index], &gIdtTable[Index], sizeof (INTERRUPT_GATE_DESCRIPTOR));
+}
+
+
+/**
+ Restore original Interrupt Descriptor Table Handler Address.
+
+ @param[in] Index The Index of the interrupt descriptor table handle.
+
+**/
+VOID
+RestoreInterruptDescriptorTableHandlerAddress (
+ IN UINTN Index
+ )
+{
+ IA32_DESCRIPTOR IdtrForBsp;
+ INTERRUPT_GATE_DESCRIPTOR *IdtEntry;
+
+ if (Index >= mOrigIdtEntryCount) {
+ return;
+ }
+
+ AsmReadIdtr (&IdtrForBsp);
+ IdtEntry = (INTERRUPT_GATE_DESCRIPTOR *) IdtrForBsp.Base;
+ CopyMem (&IdtEntry[Index], &gOrigIdtTable[Index], sizeof (INTERRUPT_GATE_DESCRIPTOR));
+}
+
+
+/**
+ Initialize cache attributes based on MTRR
+
+**/
+VOID
+InitailizeCacheAttributes (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Page;
+ EFI_CPUID_REGISTER FeatureInfo;
+ EFI_CPUID_REGISTER FunctionInfo;
+ UINT8 PhysicalAddressBits;
+ UINT32 MsrNum;
+ UINT64 TempQword;
+ UINT64 ComplementBits;
+ UINT32 VariableMtrrLimit;
+
+ VariableMtrrLimit = (UINT32) (AsmReadMsr64 (IA32_MTRR_CAP) & B_IA32_MTRR_VARIABLE_SUPPORT);
+
+ //
+ // Allocate 16 pages
+ //
+ Page = (EFI_PHYSICAL_ADDRESS) (UINTN) AllocatePages (mPageStoreSize);
+ ASSERT (Page != 0);
+
+ mPageStore = (UINT8 *) (UINTN) Page;
+
+ ZeroMem (mPageStore, 0x1000 * mPageStoreSize);
+
+ //
+ // Check returned value of Eax for extended CPUID functions
+ //
+ AsmCpuid (
+ CPUID_EXTENDED_FUNCTION,
+ &FunctionInfo.RegEax,
+ &FunctionInfo.RegEbx,
+ &FunctionInfo.RegEcx,
+ &FunctionInfo.RegEdx
+ );
+
+ PhysicalAddressBits = 36;
+
+ //
+ // If CPU supports extended functions, get the Physical Address size by reading EAX[7:0]
+ //
+ if (FunctionInfo.RegEax > CPUID_EXTENDED_FUNCTION) {
+ AsmCpuid (
+ CPUID_VIR_PHY_ADDRESS_SIZE,
+ &FeatureInfo.RegEax,
+ &FeatureInfo.RegEbx,
+ &FeatureInfo.RegEcx,
+ &FeatureInfo.RegEdx
+ );
+ PhysicalAddressBits = (UINT8) FeatureInfo.RegEax;
+ }
+
+ mValidMtrrBitsMask = (((UINT64) 1) << PhysicalAddressBits) - 1;
+ mValidMtrrAddressMask = mValidMtrrBitsMask & 0xfffffffffffff000;
+
+ ComplementBits = mValidMtrrBitsMask & 0xfffffff000000000;
+ if (ComplementBits != 0) {
+ //
+ // Disable cache and clear the corresponding MTRR bits
+ //
+ PreMtrrChange ();
+ for (MsrNum = CACHE_VARIABLE_MTRR_BASE;
+ MsrNum < (CACHE_VARIABLE_MTRR_BASE + VariableMtrrLimit * 2 - 1);
+ MsrNum += 2
+ ) {
+ TempQword = AsmReadMsr64 (MsrNum + 1);
+ if ((TempQword & B_CACHE_MTRR_VALID) != 0) {
+ //
+ // MTRR Physical Mask
+ //
+ TempQword = TempQword | ComplementBits;
+ AsmWriteMsr64 (MsrNum + 1, TempQword);
+ }
+ }
+
+ //
+ // Enable Cache and set the corresponding MTRR bits
+ //
+ PostMtrrChange ();
+ }
+}
+
+
+/**
+ Allocate zeroed pages
+
+ @retval Pointer to the page buffer
+
+**/
+VOID *
+AllocateZeroedPage (
+ VOID
+ )
+{
+ if (mPageStoreIndex >= mPageStoreSize) {
+ //
+ // We are out of space
+ //
+ return NULL;
+ }
+
+ return (VOID *) (UINTN) &mPageStore[0x1000 * mPageStoreIndex++];
+}
+
+
+/**
+ Convert 2MB page tables to 4KB page tables
+
+ @param[in] PageAddress Page address to convert
+ @param[in, out] PageDirectoryToConvert Page table that will be converted
+
+**/
+VOID
+Convert2MBPageTo4KPages (
+ IN EFI_PHYSICAL_ADDRESS PageAddress,
+ IN OUT x64_PAGE_TABLE_ENTRY **PageDirectoryToConvert
+ )
+{
+ UINTN Index;
+ EFI_PHYSICAL_ADDRESS WorkingAddress;
+ x64_PAGE_TABLE_ENTRY_4K *PageTableEntry;
+ x64_PAGE_TABLE_ENTRY Attributes;
+
+ //
+ // Save the attributes of the 2MB table
+ //
+ Attributes.Page2Mb.Uint64 = (*PageDirectoryToConvert)->Page2Mb.Uint64;
+
+ //
+ // Convert PageDirectoryEntry2MB into a 4K Page Directory
+ //
+ PageTableEntry = AllocateZeroedPage ();
+ if (PageTableEntry == NULL) {
+ return;
+ }
+ (*PageDirectoryToConvert)->Page2Mb.Uint64 = (UINT64) PageTableEntry;
+ (*PageDirectoryToConvert)->Page2Mb.Bits.ReadWrite = 1;
+ (*PageDirectoryToConvert)->Page2Mb.Bits.Present = 1;
+
+ WorkingAddress = PageAddress;
+ for (Index = 0; Index < 512; Index++, PageTableEntry++, WorkingAddress += 0x1000) {
+ PageTableEntry->Uint64 = (UINT64) WorkingAddress;
+ PageTableEntry->Bits.Present = 1;
+
+ //
+ // Update the new page to have the same attributes as the 2MB page
+ //
+ PageTableEntry->Bits.ReadWrite = Attributes.Common.ReadWrite;
+ PageTableEntry->Bits.CacheDisabled = Attributes.Common.CacheDisabled;
+ PageTableEntry->Bits.WriteThrough = Attributes.Common.WriteThrough;
+
+ if (WorkingAddress == PageAddress) {
+ //
+ // Return back the 4K page that matches the Working addresss
+ //
+ *PageDirectoryToConvert = (x64_PAGE_TABLE_ENTRY *) PageTableEntry;
+ }
+ }
+}
+
+
+/**
+ Get current memory mapping information
+
+ @param[in] BaseAddress get current memory mapping by this Base address
+ @param[out] PageTable page table that translated this base address
+ @param[out] Page2MBytes TRUE if this is 2MBytes page table
+
+ @retval EFI_NOT_FOUND page table not found
+ @retval EFI_SUCCESS page table found
+
+**/
+EFI_STATUS
+GetCurrentMapping (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ OUT x64_PAGE_TABLE_ENTRY **PageTable,
+ OUT BOOLEAN *Page2MBytes
+ )
+{
+ UINT64 Cr3;
+ x64_PAGE_MAP_AND_DIRECTORY_POINTER_2MB_4K *PageMapLevel4Entry;
+ x64_PAGE_MAP_AND_DIRECTORY_POINTER_2MB_4K *PageDirectoryPointerEntry;
+ x64_PAGE_TABLE_ENTRY_2M *PageTableEntry2Mb;
+ x64_PAGE_DIRECTORY_ENTRY_4K *PageDirectoryEntry4k;
+ x64_PAGE_TABLE_ENTRY_4K *PageTableEntry4k;
+ UINTN Pml4Index;
+ UINTN PdpIndex;
+ UINTN Pde2MbIndex;
+ UINTN PteIndex;
+
+ Cr3 = AsmReadCr3 ();
+
+ PageMapLevel4Entry = (x64_PAGE_MAP_AND_DIRECTORY_POINTER_2MB_4K *) (Cr3 & 0x000ffffffffff000);
+
+ Pml4Index = (UINTN) RShiftU64 (BaseAddress, 39) & 0x1ff;
+ if (PageMapLevel4Entry[Pml4Index].Bits.Present == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ PageDirectoryPointerEntry = (x64_PAGE_MAP_AND_DIRECTORY_POINTER_2MB_4K *) (PageMapLevel4Entry[Pml4Index].Uint64 & 0x000ffffffffff000);
+ PdpIndex = (UINTN) RShiftU64 (BaseAddress, 30) & 0x1ff;
+ if (PageDirectoryPointerEntry[PdpIndex].Bits.Present == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ PageTableEntry2Mb = (x64_PAGE_TABLE_ENTRY_2M *) (PageDirectoryPointerEntry[PdpIndex].Uint64 & 0x000ffffffffff000);
+ Pde2MbIndex = (UINTN) RShiftU64 (BaseAddress, 21) & 0x1ff;
+ if (PageTableEntry2Mb[Pde2MbIndex].Bits.Present == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ if (PageTableEntry2Mb[Pde2MbIndex].Bits.MustBe1 == 1) {
+ //
+ // We found a 2MByte page so lets return it
+ //
+ *Page2MBytes = TRUE;
+ *PageTable = (x64_PAGE_TABLE_ENTRY *) &PageTableEntry2Mb[Pde2MbIndex].Uint64;
+ return EFI_SUCCESS;
+ }
+
+ //
+ // 4K page so keep walking
+ //
+ PageDirectoryEntry4k = (x64_PAGE_DIRECTORY_ENTRY_4K *) &PageTableEntry2Mb[Pde2MbIndex].Uint64;
+
+ PageTableEntry4k = (x64_PAGE_TABLE_ENTRY_4K *) (PageDirectoryEntry4k[Pde2MbIndex].Uint64 & 0x000ffffffffff000);
+ PteIndex = (UINTN) RShiftU64 (BaseAddress, 12) & 0x1ff;
+ if (PageTableEntry4k[PteIndex].Bits.Present == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ *Page2MBytes = FALSE;
+ *PageTable = (x64_PAGE_TABLE_ENTRY *) &PageTableEntry4k[PteIndex];
+
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Prepare memory for essential system tables.
+
+ @retval EFI_SUCCESS Memory successfully prepared.
+
+**/
+EFI_STATUS
+PrepareMemory (
+ VOID
+ )
+{
+ //
+ // Allocate space to convert 2MB page tables to 4K tables.
+ // This can not be done at call time as the TPL level will
+ // not be correct.
+ //
+ InitailizeCacheAttributes ();
+
+ InitializeExternalVectorTablePtr (mExternalVectorTable);
+
+ //
+ // Initialize the Interrupt Descriptor Table
+ //
+ InitializeInterruptTables ();
+
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Prepare Wakeup Buffer and stack for APs.
+
+ @param[out] WakeUpBuffer Pointer to the address of wakeup buffer for output.
+ @param[out] StackAddressStart Pointer to the stack address of APs for output.
+ @param[in] MaximumCPUsForThisSystem Maximum CPUs in this system.
+
+ @retval EFI_SUCCESS Memory successfully prepared for APs.
+ @retval Other Error occurred while allocating memory.
+
+**/
+EFI_STATUS
+PrepareMemoryForAPs (
+ OUT EFI_PHYSICAL_ADDRESS *WakeUpBuffer,
+ OUT VOID **StackAddressStart,
+ IN UINTN MaximumCPUsForThisSystem
+ )
+{
+ EFI_STATUS Status;
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;
+
+ //
+ // Release All APs with a lock and wait for them to retire to rendezvous procedure.
+ // We need a page (4KB) of memory for IA-32 to use broadcast APIs, on a temporary basis.
+ //
+ Status = AllocateWakeUpBuffer (WakeUpBuffer);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ //
+ // Claim memory for AP stack
+ //
+ Status = AllocateReservedMemoryBelow4G (
+ MaximumCPUsForThisSystem * STACK_SIZE_PER_PROC,
+ StackAddressStart
+ );
+
+ if (EFI_ERROR (Status)) {
+ FreePages (WakeUpBuffer, 1);
+ return Status;
+ }
+
+#if defined(__GNUC__)
+ gCpuInitFloatPointUnit = (UINT64) CpuInitFloatPointUnit;
+#endif
+
+ AsmGetAddressMap (&AddressMap);
+ CopyMem ((VOID *) (UINTN) *WakeUpBuffer, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
+ *(UINT32 *) (UINTN) (*WakeUpBuffer + AddressMap.FlatJumpOffset + 3) = (UINT32) (*WakeUpBuffer + AddressMap.PModeEntryOffset);
+ *(UINT32 *) (UINTN) (*WakeUpBuffer + AddressMap.LongJumpOffset + 2) = (UINT32) (*WakeUpBuffer + AddressMap.LModeEntryOffset);
+
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Prepare exchange information for APs.
+
+ @param[out] ExchangeInfo Pointer to the exchange info buffer for output.
+ @param[in] StackAddressStart Start address of APs' stacks.
+ @param[in] ApFunction Address of function assigned to AP.
+ @param[in] WakeUpBuffer Pointer to the address of wakeup buffer.
+
+ @retval EFI_SUCCESS Exchange Info successfully prepared for APs.
+
+**/
+EFI_STATUS
+PrepareExchangeInfo (
+ OUT MP_CPU_EXCHANGE_INFO *ExchangeInfo,
+ IN VOID *StackAddressStart,
+ IN VOID *ApFunction,
+ IN EFI_PHYSICAL_ADDRESS WakeUpBuffer
+ )
+{
+ ZeroMem ((VOID *) ExchangeInfo, EFI_PAGE_SIZE - MP_CPU_EXCHANGE_INFO_OFFSET);
+
+ ExchangeInfo->Lock = VacantFlag;
+ ExchangeInfo->StackStart = StackAddressStart;
+ ExchangeInfo->StackSize = STACK_SIZE_PER_PROC;
+ ExchangeInfo->ApFunction = ApFunction;
+
+ CopyMem (
+ (VOID *) (UINTN) &ExchangeInfo->GdtrProfile,
+ (VOID *) (UINTN) mAcpiCpuData->GdtrProfile,
+ sizeof (IA32_DESCRIPTOR)
+ );
+ CopyMem (
+ (VOID *) (UINTN) &ExchangeInfo->IdtrProfile,
+ (VOID *) (UINTN) mAcpiCpuData->IdtrProfile,
+ sizeof (IA32_DESCRIPTOR)
+ );
+
+ ExchangeInfo->BufferStart = (UINT32) WakeUpBuffer;
+ ExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
+ ExchangeInfo->InitFlag = 1;
+
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Dynamically write the far jump destination in APs' wakeup buffer,
+ in order to refresh APs' CS registers for mode switching.
+
+**/
+VOID
+RedirectFarJump (
+ VOID
+ )
+{
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;
+
+ AsmGetAddressMap (&AddressMap);
+ *(UINT32 *) (UINTN) (mAcpiCpuData->WakeUpBuffer + AddressMap.FlatJumpOffset + 3) = (UINT32) (mAcpiCpuData->WakeUpBuffer + AddressMap.PModeEntryOffset);
+ *(UINT32 *) (UINTN) (mAcpiCpuData->WakeUpBuffer + AddressMap.LongJumpOffset + 2) = (UINT32) (mAcpiCpuData->WakeUpBuffer + AddressMap.LModeEntryOffset);
+
+ return;
+}
+
+
+/**
+ Prepare GDTR and IDTR for AP
+
+ @param[out] Gdtr The GDTR profile
+ @param[out] Idtr The IDTR profile
+
+ @retval EFI_STATUS status returned by each sub-routine
+ @retval EFI_SUCCESS GDTR and IDTR has been prepared for AP
+
+**/
+EFI_STATUS
+PrepareGdtIdtForAP (
+ OUT IA32_DESCRIPTOR *Gdtr,
+ OUT IA32_DESCRIPTOR *Idtr
+ )
+{
+ INTERRUPT_GATE_DESCRIPTOR *IdtForAp;
+ SEGMENT_DESCRIPTOR *GdtForAp;
+ IA32_DESCRIPTOR GdtrForBsp;
+ IA32_DESCRIPTOR IdtrForBsp;
+ UINT16 *MceHandler;
+ EFI_STATUS Status;
+
+ //
+ // Get Global Descriptor Table Register(GDTR) descriptor
+ //
+ AsmReadGdtr (&GdtrForBsp);
+
+ //
+ // Get Interrupt Descriptor Table Register(IDTR) descriptor
+ //
+ AsmReadIdtr (&IdtrForBsp);
+
+ //
+ // Allocate reserved memory for IDT
+ //
+ Status = AllocateAlignedReservedMemory (
+ IdtrForBsp.Limit + 1,
+ 8,
+ (VOID **) &IdtForAp
+ );
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ //
+ // Allocate reserved memory for GDT
+ //
+ Status = AllocateAlignedReservedMemory (
+ GdtrForBsp.Limit + 1,
+ 8,
+ (VOID **) &GdtForAp
+ );
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ MceHandler = AllocateReservedPool (SIZE_OF_MCE_HANDLER);
+ if (MceHandler == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ //
+ // McheHandler content: iret (opcode = 0xcf)
+ //
+ *MceHandler = 0xCF48;
+
+ CopyMem (GdtForAp, (VOID *) GdtrForBsp.Base, GdtrForBsp.Limit + 1);
+ CopyMem (IdtForAp, (VOID *) IdtrForBsp.Base, IdtrForBsp.Limit + 1);
+
+ IdtForAp[INTERRUPT_HANDLER_MACHINE_CHECK].Offset15To0 = (UINT16) (UINTN) MceHandler;
+ IdtForAp[INTERRUPT_HANDLER_MACHINE_CHECK].Offset31To16 = (UINT16) ((UINTN) MceHandler >> 16);
+ IdtForAp[INTERRUPT_HANDLER_MACHINE_CHECK].Offset63To32 = (UINT32) ((UINTN) MceHandler >> 32);
+
+ //
+ // Create Gdtr, IDTR profile
+ //
+ Gdtr->Base = (UINTN) GdtForAp;
+ Gdtr->Limit = GdtrForBsp.Limit;
+
+ Idtr->Base = (UINTN) IdtForAp;
+ Idtr->Limit = IdtrForBsp.Limit;
+
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpCpu.c b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpCpu.c new file mode 100644 index 0000000000..fd09ba1b85 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpCpu.c @@ -0,0 +1,80 @@ +/** @file
+ MP Support functions.
+
+ Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <MpService.h>
+
+GLOBAL_REMOVE_IF_UNREFERENCED ACPI_CPU_DATA *mAcpiCpuData;
+GLOBAL_REMOVE_IF_UNREFERENCED MP_SYSTEM_DATA *mMPSystemData;
+
+//
+// Function declarations
+//
+/**
+ Initializes MP support in the system.
+
+ @retval EFI_SUCCESS Multiple processors are initialized successfully.
+ @retval EFI_OUT_OF_RESOURCES No enough resoruces (such as out of memory).
+
+**/
+EFI_STATUS
+InitializeMpSupport (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ MP_CPU_RESERVED_DATA *MpCpuReservedData;
+
+ MpCpuReservedData = NULL;
+
+ //
+ // Allocate memory for MP CPU related data below 4G
+ //
+ Status = AllocateReservedMemoryBelow4G (
+ sizeof (MP_CPU_RESERVED_DATA),
+ (VOID **) &MpCpuReservedData
+ );
+
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ if (MpCpuReservedData != NULL) {
+ ZeroMem (MpCpuReservedData, sizeof (MP_CPU_RESERVED_DATA));
+ } else {
+ ASSERT (MpCpuReservedData != NULL);
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ mMPSystemData = &(MpCpuReservedData->MPSystemData);
+ mAcpiCpuData = &(MpCpuReservedData->AcpiCpuData);
+
+ //
+ // Initialize ACPI_CPU_DATA data
+ //
+ mAcpiCpuData->CpuPrivateData = (EFI_PHYSICAL_ADDRESS) (UINTN) (&(mMPSystemData->S3DataPointer));
+ mAcpiCpuData->S3BootPath = FALSE;
+ mAcpiCpuData->GdtrProfile = (EFI_PHYSICAL_ADDRESS) & (MpCpuReservedData->GdtrProfile);
+ mAcpiCpuData->IdtrProfile = (EFI_PHYSICAL_ADDRESS) & (MpCpuReservedData->IdtrProfile);
+
+ //
+ // Initialize MP services
+ //
+ InitializeMpServices ();
+
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpEqu.h b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpEqu.h new file mode 100644 index 0000000000..98c3b925f1 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpEqu.h @@ -0,0 +1,38 @@ +/** @file
+ This is the equates file for HT (Hyper-threading) support.
+
+ Copyright (c) 1999 - 2016, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#define VacantFlag 0x00
+#define NotVacantFlag 0xff
+#define BreakToRunApSignal 0x6E755200
+#define MonitorFilterSize 0x40
+#define WakeUpApCounterInit 0
+#define WakeUpApPerHltLoop 1
+#define WakeUpApPerMwaitLoop 2
+#define WakeUpApPerRunLoop 3
+#define WakeUpApPerMwaitLoop32 4
+#define WakeUpApPerRunLoop32 5
+
+#define LockLocation (0x1000 - 0x0400)
+#define StackStartAddressLocation (LockLocation + 0x08)
+#define StackSizeLocation (LockLocation + 0x10)
+#define CProcedureLocation (LockLocation + 0x18)
+#define GdtrLocation (LockLocation + 0x20)
+#define IdtrLocation (LockLocation + 0x2A)
+#define BufferStartLocation (LockLocation + 0x34)
+#define Cr3OffsetLocation (LockLocation + 0x38)
+#define InitFlagLocation (LockLocation + 0x3C)
+#define WakeUpApManner (LockLocation + 0x40)
+#define BistBuffer (LockLocation + 0x44)
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpEqu.inc b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpEqu.inc new file mode 100644 index 0000000000..af9a6bc45d --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpEqu.inc @@ -0,0 +1,44 @@ +;; @file
+; Equates for MP initialization.
+;
+; Copyright (c) 2005 - 2016, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+;
+;;
+
+VacantFlag Equ 00h
+NotVacantFlag Equ 0ffh
+BreakToRunApSignal Equ 6E755200h
+
+MonitorFilterSize Equ 40h
+WakeUpApCounterInit Equ 0
+WakeUpApPerHltLoop Equ 1
+WakeUpApPerMwaitLoop Equ 2
+WakeUpApPerRunLoop Equ 3
+WakeUpApPerMwaitLoop32 Equ 4
+WakeUpApPerRunLoop32 Equ 5
+
+LockLocation equ 1000h - 0400h
+StackStartAddressLocation equ LockLocation + 08h
+StackSizeLocation equ LockLocation + 10h
+CProcedureLocation equ LockLocation + 18h
+GdtrLocation equ LockLocation + 20h
+IdtrLocation equ LockLocation + 2Ah
+BufferStartLocation equ LockLocation + 34h
+Cr3OffsetLocation equ LockLocation + 38h
+InitFlagLocation equ LockLocation + 3Ch
+WakeUpApManner equ LockLocation + 40h
+BistBuffer equ LockLocation + 44h
+
+PAUSE32 MACRO
+ DB 0F3h
+ DB 090h
+ ENDM
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpFuncs.S b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpFuncs.S new file mode 100644 index 0000000000..6b1fa1f679 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpFuncs.S @@ -0,0 +1,610 @@ +## @file
+# This is the assembly code for EM64T MP support.
+#
+# Copyright (c) 1999 - 2016, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+#include "MpEqu.h"
+
+ .text
+ ASM_FUNCTION_REMOVE_IF_UNREFERENCED
+
+//-------------------------------------------------------------------------------------
+//RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+//procedure serializes all the AP processors through an Init sequence. It must be
+//noted that APs arrive here very raw...ie: real mode, no stack.
+//ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+//IS IN MACHINE CODE.
+//-------------------------------------------------------------------------------------
+//RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+.set L_RunLoopAndMwaitLoop32Offset, L_RunLoopAndMwaitLoop32 - L_LongModeStart
+.set L_RunLoopAndMwaitLoop32JumpOffset, L_RunLoopAndMwaitLoop32Jump - L_LongModeStart
+.set L_RunLoopAndMwaitLoop64Offset, L_RunLoopAndMwaitLoop64 - L_LongModeStart
+.set L_RunLoopAndMwaitLoop64JumpOffset, L_RunLoopAndMwaitLoop64Jump - L_LongModeStart
+.set L_LongModeStartJumpOffset, L_LongModeStartJump - L_RendezvousFunnelProcStart
+
+ASM_GLOBAL ASM_PFX(CpuInitFloatPointUnit)
+
+.globl ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+L_RendezvousFunnelProcStart:
+
+// At this point CS = 0x(vv00) and ip= 0x0.
+ .byte 0x66, 0x8b, 0xe8 // mov ebp, eax
+
+ .byte 0x8c, 0xc8 // mov ax, cs
+ .byte 0x8e, 0xd8 // mov ds, ax
+ .byte 0x8e, 0xc0 // mov es, ax
+ .byte 0x8e, 0xd0 // mov ss, ax
+ .byte 0x33, 0xc0 // xor ax, ax
+ .byte 0x8e, 0xe0 // mov fs, ax
+ .byte 0x8e, 0xe8 // mov gs, ax
+
+// Get APIC ID
+//
+ .byte 0x66, 0xB8
+ .long 0x00000001 // mov eax, 1
+ .byte 0x0F, 0xA2 // cpuid
+ .byte 0x66, 0xC1, 0xEB, 0x18 // shr ebx, 24
+ .byte 0x66, 0x81, 0xE3
+ .long 0x000000FF // and ebx, 0ffh // EBX is APIC ID
+
+// If it is the first time AP wakes up, just record AP's BIST
+// Otherwise, switch to protected mode.
+
+ .byte 0xBE // opcode of mov si, imm16
+ .short InitFlagLocation // mov si, InitFlag
+ .byte 0x66, 0x83, 0x3C, 0x00 // cmp dword ptr [si], 0
+ .byte 0x74, 0x14 // jz flat32Start
+
+// Record BIST information
+//
+ .byte 0xB0, 0x08 // mov al, 8
+ .byte 0xF6, 0xE3 // mul bl
+
+ .byte 0xBE // mov si, imm16
+ .short BistBuffer // mov si, BistBuffer
+ .byte 0x03, 0xF0 // add si, ax
+
+ .byte 0x66, 0xC7, 0x04
+ .long 0x00000001 // mov dword ptr [si], 1 // Set Valid Flag
+ .byte 0x66, 0x89, 0x6C, 0x04 // mov dword ptr [si + 4], ebp // Store BIST value
+
+// Switch to flat mode.
+
+L_flat32Start:
+
+ .byte 0xBF // opcode of mov di, imm16
+ .short BufferStartLocation // mov di, BufferStartLocation
+ .byte 0x66, 0x8B, 0x35 // mov esi,dword ptr [di] // ESI is keeping the start address of wakeup buffer
+
+ .byte 0xBF // opcode of mov di, imm16
+ .short Cr3OffsetLocation // mov di, Cr3Location
+ .byte 0x66, 0x8B, 0x0D // mov ecx,dword ptr [di] // ECX is keeping the value of CR3
+
+ .byte 0xBF // opcode of mov di, imm16
+ .short GdtrLocation // mov di, GdtrProfile
+ .byte 0x66 // db 66h
+ .byte 0x2E, 0x0F, 0x01, 0x15 // lgdt fword ptr cs:[di]
+
+ .byte 0xBF // opcode of mov di, imm16
+ .short IdtrLocation // mov di, IdtrProfile
+ .byte 0x66 // db 66h
+ .byte 0x2E, 0x0F, 0x01, 0x1D // lidt fword ptr cs:[di]
+
+ .byte 0xBF // opcode of mov di, imm16
+ .short L_LongModeStartJumpOffset // Get offset of LongModeStartJump
+ .byte 0x66, 0x8B, 0x3D // mov edi,dword ptr [di] // EDI is keeping the LongModeStart Jump Address
+
+ .byte 0x31, 0xC0 // xor ax, ax
+ .byte 0x8E, 0xD8 // mov ds, ax
+
+ .byte 0x0F, 0x20, 0xC0 // mov eax, cr0 // Get control register 0
+ .byte 0x66, 0x83, 0xC8, 0x03 // or eax, 000000003h // Set PE bit (bit //0) and MP
+ .byte 0x0F, 0x22, 0xC0 // mov cr0, eax
+
+L_FLAT32_JUMP:
+
+ .byte 0x66, 0x67, 0xEA // far jump
+ .long 0x0 // 32-bit offset
+ .short 0x20 // 16-bit selector
+
+L_NemInit: // 32-bit protected mode entry point
+ .byte 0x66, 0xB8, 0x18, 0x00 // mov ax, 18h
+ .byte 0x66, 0x8E, 0xD8 // mov ds, ax
+ .byte 0x66, 0x8E, 0xC0 // mov es, ax
+ .byte 0x66, 0x8E, 0xE0 // mov fs, ax
+ .byte 0x66, 0x8E, 0xE8 // mov gs, ax
+ .byte 0x66, 0x8E, 0xD0 // mov ss, ax // Flat mode setup.
+
+L_PrepareToGoLongMode64:
+ .byte 0x0F, 0x20, 0xE0 // mov eax, cr4
+ .byte 0x66, 0x0D, 0x20, 0x06 // or ax, 0620h
+ .byte 0x0F, 0x22, 0xE0 // mov cr4, eax
+
+ .byte 0x0F, 0x22, 0xD9 // mov cr3, ecx
+
+ .byte 0xB9
+ .long 0xC0000080 // mov ecx, 0c0000080h // EFER MSR number.
+ .byte 0x0F, 0x32 // rdmsr // Read EFER.
+ .byte 0x0F, 0xBA, 0xE8, 0x08 // bts eax, 8 // Set LME=1.
+ .byte 0x0F, 0x30 // wrmsr // Write EFER.
+
+ .byte 0x0F, 0x20, 0xC0 // mov eax, cr0 // Read CR0.
+ .byte 0x0F, 0xBA, 0xE8, 0x1F // bts eax, 31 // Set PG=1.
+ .byte 0x0F, 0x22, 0xC0 // mov cr0, eax // Write CR0.
+
+L_LONG_JUMP:
+
+ .byte 0x67, 0xEA // far jump
+
+L_LongModeStartJump:
+
+ .long 0x0 // 32-bit offset
+ .short 0x38 // 16-bit selector
+
+L_LongModeStart:
+
+ mov $0x30, %ax
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %ss
+
+L_WaitFirstApTaskAssigned:
+//
+// First INIT-SIPI-SIPI will loop here until DetailedMpInitialization function assigned for each AP
+//
+ pause
+ cmpq $0, CProcedureLocation(%esi)
+ jz L_WaitFirstApTaskAssigned
+
+//
+// Patch Addresses for jumping between RUN and MONITOR MWAIT loops 32-bits and Long Monde Procedure 64-bits
+// Based on the running address of LongModeStart in physic memory which was actually copied by CPU DXE INIT
+//
+ xor %rdx, %rdx
+ mov %edi, %eax
+ add $L_RunLoopAndMwaitLoop32Offset, %eax
+ mov %edi, %edx
+ add $L_RunLoopAndMwaitLoop32JumpOffset, %edx
+ mov %eax, (%rdx)
+
+ mov %rdx, %rbp // RBP = 32-bits compatibility mode FAR JUMP m16:32 operand pointer
+
+ mov %edi, %eax
+ add $L_RunLoopAndMwaitLoop64Offset, %eax
+ mov %edi, %edx
+ add $L_RunLoopAndMwaitLoop64JumpOffset, %edx
+ mov %eax, (%rdx)
+
+ //
+ // ProgramStack
+ //
+ xor %rcx, %rcx
+ mov %esi, %edi
+ add $BistBuffer, %edi
+ mov (%edi,%ebx,8), %ecx // RCX = CpuNumber
+
+ mov %esi, %edi
+ add $StackSizeLocation, %edi
+ mov (%edi), %rax
+ inc %rcx
+ mul %rcx // RAX = StackSize * (CpuNumber + 1)
+
+ mov %esi, %edi
+ add $StackStartAddressLocation, %edi
+ mov (%edi), %rdx
+ add %rdx, %rax // RAX = StackStart + StackSize * (CpuNumber + 1)
+
+ mov %rax, %rsp
+ sub $MonitorFilterSize, %rsp // Reserved Monitor data space
+ or $BreakToRunApSignal, %ebx // ebx = #Cpu run signature
+
+ //
+ // Call assembly function to initialize FPU.
+ //
+ mov ASM_PFX(gCpuInitFloatPointUnit)(%rip), %rax
+ sub $0x20, %rsp
+ callq *%rax
+ add $0x20, %rsp
+
+ //
+ // Load C Function pointer and wakeup manner location
+ //
+ mov %esi, %edi
+ add $CProcedureLocation, %edi
+ add $WakeUpApManner, %esi // esi = WakeUpApManner Address Location
+
+L_WakeUpThisAp64:
+
+ movq (%edi),%rax
+
+ test %rax, %rax
+ je L_CheckWakeUpCounterInit64
+
+ push %rbp
+ push %rbx
+ push %rsi
+ push %rdi
+
+ sub $0x20, %rsp
+ callq *%rax
+ add $0x20, %rsp
+
+ pop %rdi
+ pop %rsi
+ pop %rbx
+ pop %rbp
+
+L_CheckWakeUpCounterInit64:
+
+ cmpl $WakeUpApCounterInit, (%esi)
+ jnz L_CheckWakeUpManner64
+
+ //
+ // Initialize MONITOR_MWAIT_DATA data structure per thread
+ //
+ xor %rcx, %rcx
+ mov %rcx, (%rsp) // BreakToRunApSignal
+ mov %rcx, 0x8(%rsp) // HltLoopBreakCounter
+ mov %rcx, 0x10(%rsp) // MwaitLoopBreakCounter
+ mov %rcx, 0x18(%rsp) // RunLoopBreakCounter
+ mov %rcx, 0x20(%rsp) // MwaitLoopBreakCounter32
+ mov %rcx, 0x28(%rsp) // RunLoopBreakCounter32
+ mov %rcx, 0x30(%rsp) // WakeUpApVectorChangeFlag
+ mov %rcx, 0x38(%rsp) // MwaitTargetCstate
+
+L_WaitWakeUpMannerAssigned:
+
+ pause
+ cmpl $WakeUpApCounterInit, (%esi)
+ jz L_WaitWakeUpMannerAssigned
+
+L_CheckWakeUpManner64:
+
+ pause
+ mov (%esi), %edx
+ cmp $WakeUpApPerHltLoop, %edx
+ jz L_HltApLoop64
+
+ cmp $WakeUpApPerMwaitLoop, %edx
+ jz L_ApMwaitLoop64
+
+ cmp $WakeUpApPerRunLoop, %edx
+ jz L_CheckRunSignal64
+
+ jmp L_JumpToCompatibility32Mode
+
+L_ApMwaitLoop64:
+
+ cli
+ mov %rsp, %rax // Set Monitor Address
+ xor %rcx, %rcx
+ xor %rdx, %rdx
+ .byte 0x0F, 1, 0xC8 // MONITOR
+ mov 0x38(%rsp), %rax // Mwait Target C-State per rax[7:4]
+ .byte 0x0F, 1, 0xC9 // MWAIT
+
+L_CheckRunSignal64:
+
+ cmp %rbx, (%rsp) // Check if run signal correct?
+ jnz L_CheckWakeUpManner64 // Unknown break, go checking run manner
+
+ jmp L_WakeUpThisAp64 // Jmp to execute AP task
+
+L_HltApLoop64:
+ cli
+ hlt
+ jmp L_HltApLoop64 // Jump to halt loop
+
+L_JumpToCompatibility32Mode:
+
+ .byte 0xFF, 0x6D, 0x00 // jmp pword ptr [rbp+0] // Far jump to m16:32 for 32-bits compatibility mode
+
+L_RunLoopAndMwaitLoop32Jump:
+
+ .long 0x00 // m32 part of m16:32
+ .short 0x20 // m16 part of m16:32
+
+L_RunLoopAndMwaitLoop32:
+
+ .byte 0x66, 0xB8, 0x18, 0x00 // mov ax, 18h
+ .byte 0x66, 0x8E, 0xD8 // mov ds, ax
+ .byte 0x8e, 0xd0 // mov ss, ax
+
+ .byte 0xF, 0x20, 0xC0 // mov eax, cr0 // Read CR0.
+ .byte 0xF, 0xBA, 0xF0, 0x1F // btr eax, 31 // Reset PG=0.
+ .byte 0xF, 0x22, 0xC0 // mov cr0, eax // Write CR0.
+
+ .byte 0xB9
+ .long 0xC0000080 // mov ecx, 0c0000080h // EFER MSR number.
+ .byte 0xF, 0x32 // rdmsr // Read EFER.
+ .byte 0xF, 0xBA, 0xF0, 0x8 // btr eax, 8 // Reset LME=0.
+ .byte 0xF, 0x30 // wrmsr // Write EFER.
+
+ .byte 0xF, 0x20, 0xE0 // mov eax, cr4
+ .byte 0x24, 0xDF // and al, 0DFh // Reset PAE=0 in CR4 bit 5
+ .byte 0xF, 0x22, 0xE0 // mov cr4, eax
+
+L_CheckWakeUpManner32:
+
+ pause
+ cmpq $WakeUpApPerMwaitLoop32, (%rsi) // Use rsi for esi per compling in 64-bits mode
+ jnz L_CheckRunSignal32
+
+ cli
+ mov %esp, %eax // Set Monitor Address
+ xor %ecx, %ecx
+ xor %edx, %edx
+ .byte 0xf, 1, 0xc8 // MONITOR
+ mov 0x38(%rsp), %eax // Mwait Target C-State per eax[7:4]
+ .byte 0xf, 1, 0xc9 // MWAIT
+
+
+L_CheckRunSignal32:
+
+ cmp %ebx, (%rsp) // Check if run signal correct?
+ jnz L_CheckWakeUpManner32 // Unknown break, go checking run manner
+
+ .byte 0xF, 0x20, 0xE0 // mov eax, cr4
+ .byte 0xC, 0x20 // or al, 20h // Set PAE=1 in CR4 bit 5
+ .byte 0xF, 0x22, 0xE0 // mov cr4, eax
+
+ .byte 0xB9
+ .long 0xC0000080 // mov ecx, 0c0000080h // EFER MSR number.
+ .byte 0xF, 0x32 // rdmsr // Read EFER.
+ .byte 0xF, 0xBA, 0xE8, 0x8 // bts eax, 8 // Set LME=1.
+ .byte 0xF, 0x30 // wrmsr // Write EFER.
+
+ .byte 0xF, 0x20, 0xC0 // mov eax, cr0 // Read CR0.
+ .byte 0xF, 0xBA, 0xE8, 0x1F // bts eax, 31 // Set PG=1.
+ .byte 0xF, 0x22, 0xC0 // mov cr0, eax // Write CR0.
+
+ .byte 0x67, 0xEA // far jump back to 64-bits long mode
+
+L_RunLoopAndMwaitLoop64Jump:
+
+ .long 0x00 // 32-bit offset
+ .short 0x38 // 16-bit selector
+
+L_RunLoopAndMwaitLoop64:
+
+ mov $0x30, %ax
+ mov %ax, %ds
+ mov %ax, %ss
+
+ jmp L_WakeUpThisAp64
+
+.globl ASM_PFX(gCpuInitFloatPointUnit)
+ASM_PFX(gCpuInitFloatPointUnit):
+.long 0x0
+.long 0x0
+
+L_RendezvousFunnelProcEnd:
+
+//-------------------------------------------------------------------------------------
+// AsmGetAddressMap (&AddressMap);
+//-------------------------------------------------------------------------------------
+.set L_NemInitOffset, L_NemInit - L_RendezvousFunnelProcStart
+.set L_FLAT32_JUMPOffset, L_FLAT32_JUMP - L_RendezvousFunnelProcStart
+.set L_LongModeStartOffset, L_LongModeStart - L_RendezvousFunnelProcStart
+.set L_LONG_JUMPOffset, L_LONG_JUMP - L_RendezvousFunnelProcStart
+.set L_RendezvousFunnelProcEndOffset, L_RendezvousFunnelProcEnd - L_RendezvousFunnelProcStart
+
+.globl ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+ leaq ASM_PFX(RendezvousFunnelProc)(%rip), %rax
+ mov %rax, (%rcx)
+ xor %rax, %rax
+ mov $L_NemInitOffset, %eax
+ mov %rax, 0x8(%rcx)
+ mov $L_FLAT32_JUMPOffset, %eax
+ mov %rax, 0x10(%rcx)
+ mov $L_LongModeStartOffset, %eax
+ mov %rax, 0x18(%rcx)
+ mov $L_LONG_JUMPOffset, %eax
+ mov %rax, 0x20(%rcx)
+ mov $L_RendezvousFunnelProcEndOffset, %eax
+ mov %rax, 0x28(%rcx)
+ retq
+
+#undef EFI_SUCCESS
+#undef EFI_NOT_READY
+#define EFI_SUCCESS 0
+#define EFI_NOT_READY 0x8000000000000006
+
+.globl ASM_PFX(AsmTryAcquireMPLock)
+ASM_PFX(AsmTryAcquireMPLock):
+
+ mov $NotVacantFlag, %al
+
+ xchg %al, (%rcx)
+ cmp $VacantFlag, %al
+ jz L_Vacant
+
+ mov $EFI_NOT_READY, %rax
+ jmp L_Done
+
+L_Vacant:
+ mov $EFI_SUCCESS, %rax
+L_Done:
+ retq
+
+.globl ASM_PFX(AsmAcquireMPLock)
+ASM_PFX(AsmAcquireMPLock):
+
+ mov $NotVacantFlag, %al
+L_TryGetLock:
+ xchg %al, (%rcx)
+ cmp $VacantFlag, %al
+ jz L_LockObtained
+
+ pause
+ jmp L_TryGetLock
+
+L_LockObtained:
+ retq
+
+.globl ASM_PFX(AsmReleaseMPLock)
+ASM_PFX(AsmReleaseMPLock):
+
+ mov $VacantFlag, %al
+ xchg %al, (%rcx)
+
+ retq
+
+//-------------------------------------------------------------------------------------
+//AsmExchangeRole procedure follows. This procedure executed by current BSP, that is
+//about to become an AP. It switches it'stack with the current AP.
+//AsmExchangeRole (IN CPU_EXCHANGE_INFO *MyInfo, IN CPU_EXCHANGE_INFO *OthersInfo);
+//-------------------------------------------------------------------------------------
+#define CPU_SWITCH_STATE_IDLE 0
+#define CPU_SWITCH_STATE_STORED 1
+#define CPU_SWITCH_STATE_LOADED 2
+
+.globl ASM_PFX(AsmExchangeRole)
+ASM_PFX(AsmExchangeRole):
+ // DO NOT call other functions in this function, since 2 CPU may use 1 stack
+ // at the same time. If 1 CPU try to call a functiosn, stack will be corrupted.
+
+ push %rax
+ push %rbx
+ push %rcx
+ push %rdx
+ push %rsi
+ push %rdi
+ push %rbp
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %cr0, %rax
+ push %rax
+
+ mov %cr4, %rax
+ push %rax
+
+ // rsi contains MyInfo pointer
+ mov %rcx, %rsi
+
+ // rdi contains OthersInfo pointer
+ mov %rdx, %rdi
+
+ //Store EFLAGS, GDTR and IDTR regiter to stack
+ pushfq
+ sgdt 16(%rsi)
+ sidt 26(%rsi)
+
+ // Store the its StackPointer
+ mov %rsp, 8(%rsi)
+
+ // update its switch state to STORED
+ mov $NotVacantFlag, %al
+L_TryLock1:
+ xchg %al, (%rsi)
+ cmp $VacantFlag, %al
+ jz L_LockObtained1
+ pause
+ jmp L_TryLock1
+
+L_LockObtained1:
+ movb $CPU_SWITCH_STATE_STORED, 1(%rsi)
+ xchg %al, (%rsi)
+
+L_WaitForOtherStored:
+ // wait until the other CPU finish storing its state
+ mov $NotVacantFlag, %al
+L_TryLock2:
+ xchg %al, (%rdi)
+ cmp $VacantFlag, %al
+ jz L_LockObtained2
+ pause
+ jmp L_TryLock2
+
+L_LockObtained2:
+ mov 1(%rdi), %bl
+ xchg %al, (%rdi)
+ cmp $CPU_SWITCH_STATE_STORED, %bl
+ jb L_WaitForOtherStored
+
+ // Since another CPU already stored its state, load them
+ // load GDTR value
+ lgdt 16(%rdi)
+
+ // load IDTR value
+ lidt 26(%rdi)
+
+ // load its future StackPointer
+ mov 8(%rdi), %rsp
+
+ // update its switch state to LOADED
+ mov $NotVacantFlag, %al
+L_TryLock3:
+ xchg %al, (%rsi)
+ cmp $VacantFlag, %al
+ jz L_LockObtained3
+ pause
+ jmp L_TryLock3
+
+L_LockObtained3:
+ movb $CPU_SWITCH_STATE_LOADED, 1(%rsi)
+ xchg %al, (%rsi)
+
+L_WaitForOtherLoaded:
+ // wait until the other CPU finish loading new state,
+ // otherwise the data in stack may corrupt
+ mov $NotVacantFlag, %al
+L_TryLock4:
+ xchg %al, (%rdi)
+ cmp $VacantFlag, %al
+ jz L_LockObtained4
+ pause
+ jmp L_TryLock4
+
+L_LockObtained4:
+ mov 1(%rdi), %bl
+ xchg %al, (%rdi)
+ cmp $CPU_SWITCH_STATE_LOADED, %bl
+ jb L_WaitForOtherLoaded
+
+ // since the other CPU already get the data it want, leave this procedure
+ popfq
+
+ pop %rax
+ mov %rax, %cr4
+
+ pop %rax
+ mov %rax, %cr0
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rbp
+ pop %rdi
+ pop %rsi
+ pop %rdx
+ pop %rcx
+ pop %rbx
+ pop %rax
+
+ retq
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpFuncs.asm b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpFuncs.asm new file mode 100644 index 0000000000..ead13793f0 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/MpFuncs.asm @@ -0,0 +1,591 @@ +;; @file
+; This is the assembly code for EM64T MP support.
+;
+; Copyright (c) 2005 - 2016, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+;
+;;
+
+include MpEqu.inc
+CpuInitFloatPointUnit PROTO C
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+text SEGMENT
+
+RendezvousFunnelProc PROC PUBLIC
+RendezvousFunnelProcStart::
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+ db 66h, 08bh, 0e8h ; mov ebp, eax
+
+ db 8ch, 0c8h ; mov ax, cs
+ db 8eh, 0d8h ; mov ds, ax
+ db 8eh, 0c0h ; mov es, ax
+ db 8eh, 0d0h ; mov ss, ax
+ db 33h, 0c0h ; xor ax, ax
+ db 8eh, 0e0h ; mov fs, ax
+ db 8eh, 0e8h ; mov gs, ax
+
+; Get APIC ID
+;
+ db 66h, 0B8h
+ dd 00000001h ; mov eax, 1
+ db 0Fh, 0A2h ; cpuid
+ db 66h, 0C1h, 0EBh, 18h ; shr ebx, 24
+ db 66h, 81h, 0E3h
+ dd 000000FFh ; and ebx, 0ffh ; EBX is APIC ID
+
+; If it is the first time AP wakes up, just record AP's BIST
+; Otherwise, switch to protected mode.
+
+ db 0BEh ; opcode of mov si, imm16
+ dw InitFlagLocation ; mov si, InitFlag
+ db 66h, 83h, 3Ch, 00h ; cmp dword ptr [si], 0
+ db 74h ; opcode of jz
+ db flat32Start - ($ + 1) ; jz flat32Start
+
+; Record BIST information
+;
+ db 0B0h, 08h ; mov al, 8
+ db 0F6h, 0E3h ; mul bl
+
+ db 0BEh ; opcode of mov si, imm16
+ dw BistBuffer ; mov si, BistBuffer
+ db 03h, 0F0h ; add si, ax
+
+ db 66h, 0C7h, 04h
+ dd 00000001h ; mov dword ptr [si], 1 ; Set Valid Flag
+ db 66h, 89h, 6Ch, 04h ; mov dword ptr [si + 4], ebp ; Store BIST value
+
+;
+; Switch to flat mode.
+;
+flat32Start::
+
+ db 0BFh ; opcode of mov di, imm16
+ dw BufferStartLocation ; mov di, BufferStartLocation
+ db 66h, 8Bh, 35h ; mov esi,dword ptr [di] ; ESI is keeping the start address of wakeup buffer
+
+ db 0BFh ; opcode of mov di, imm16
+ dw Cr3OffsetLocation ; mov di, Cr3Location
+ db 66h, 8Bh, 0Dh ; mov ecx,dword ptr [di] ; ECX is keeping the value of CR3
+
+ db 0BFh ; opcode of mov di, imm16
+ dw GdtrLocation ; mov di, GdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 15h ; lgdt fword ptr cs:[di]
+
+ db 0BFh ; opcode of mov di, imm16
+ dw IdtrLocation ; mov di, IdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 1Dh ; lidt fword ptr cs:[di]
+
+ db 0BFh ; opcode of mov di, imm16
+ dw LongModeStartJump - RendezvousFunnelProcStart ; Get offset of LongModeStartJump
+ db 66h, 8Bh, 3Dh ; mov edi,dword ptr [di] ; EDI is keeping the LongModeStart Jump Address
+
+ db 31h, 0C0h ; xor ax, ax
+ db 8Eh, 0D8h ; mov ds, ax
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0
+ db 66h, 83h, 0C8h, 03h ; or eax, 000000003h ; Set PE bit (bit #0) and MP
+ db 0Fh, 22h, 0C0h ; mov cr0, eax
+
+FLAT32_JUMP::
+
+ db 66h, 67h, 0EAh ; far jump
+ dd 0h ; 32-bit offset
+ dw 20h ; 16-bit selector
+
+NemInit:: ; 32-bits protected mode entry point
+
+ db 66h, 0B8h, 18h, 00h ; mov ax, 18h
+ db 66h, 8Eh, 0D8h ; mov ds, ax
+ db 66h, 8Eh, 0C0h ; mov es, ax
+ db 66h, 8Eh, 0E0h ; mov fs, ax
+ db 66h, 8Eh, 0E8h ; mov gs, ax
+ db 66h, 8Eh, 0D0h ; mov ss, ax ; Flat mode setup.
+
+
+PrepareToGoLongMode64::
+
+ db 0Fh, 20h, 0E0h ; mov eax, cr4
+ db 66h, 0Dh, 020h, 06h ; or ax, 0620h ; Set PAE=1, OSFXSR=1, OSXMMEXCPT=1.
+ db 0Fh, 22h, 0E0h ; mov cr4, eax
+
+ db 0Fh, 22h, 0D9h ; mov cr3, ecx
+
+ db 0B9h
+ dd 0C0000080h ; mov ecx, 0c0000080h ; EFER MSR number.
+ db 0Fh, 32h ; rdmsr ; Read EFER.
+ db 0Fh, 0BAh, 0E8h, 08h ; bts eax, 8 ; Set LME=1.
+ db 0Fh, 30h ; wrmsr ; Write EFER.
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Read CR0.
+ db 0Fh, 0BAh, 0E8h, 1Fh ; bts eax, 31 ; Set PG=1.
+ db 0Fh, 22h, 0C0h ; mov cr0, eax ; Write CR0.
+
+LONG_JUMP::
+
+ db 67h, 0EAh ; far jump
+
+LongModeStartJump:
+
+ dd 0h ; 32-bit offset
+ dw 38h ; 16-bit selector
+
+
+LongModeStart::
+
+ mov ax, 30h
+ mov ds, ax
+ mov es, ax
+ mov ss, ax
+
+WaitFirstApTaskAssigned::
+;
+; First INIT-SIPI-SIPI will loop here until DetailedMpInitialization function assigned for each AP
+;
+ pause
+ cmp qword ptr [esi+CProcedureLocation], 0
+ jz WaitFirstApTaskAssigned
+
+;
+; Patch Addresses for jumping between RUN and MONITOR MWAIT loops 32-bits and Long Monde Procedure 64-bits
+; Based on the running address of LongModeStart in physic memory which was actually copied by CPU DXE INIT
+;
+ xor rdx, rdx
+ mov eax, edi
+ add eax, RunLoopAndMwaitLoop32 - LongModeStart
+ mov edx, edi
+ add edx, RunLoopAndMwaitLoop32Jump - LongModeStart
+ mov dword ptr [rdx], eax
+
+ mov rbp, rdx ; RBP = 32-bits compatibility mode FAR JUMP m16:32 operand pointer
+
+ mov eax, edi
+ add eax, RunLoopAndMwaitLoop64 - LongModeStart
+ mov edx, edi
+ add edx, RunLoopAndMwaitLoop64Jump - LongModeStart
+ mov dword ptr [rdx], eax
+
+;
+; ProgramStack
+;
+ xor rcx, rcx
+ mov edi, esi
+ add edi, BistBuffer
+ mov ecx, dword ptr [edi + 8 * ebx] ; RCX = CpuNumber
+
+ mov edi, esi
+ add edi, StackSizeLocation
+ mov rax, qword ptr [edi]
+ inc rcx
+ mul rcx ; RAX = StackSize * (CpuNumber + 1)
+
+ mov edi, esi
+ add edi, StackStartAddressLocation
+ mov rdx, qword ptr [edi]
+ add rax, rdx ; RAX = StackStart + StackSize * (CpuNumber + 1)
+
+ mov rsp, rax
+ sub rsp, MonitorFilterSize ; Reserved Monitor data space
+ or ebx, BreakToRunApSignal ; ebx = #Cpu run signature
+
+;
+; Call assembly function to initialize FPU.
+;
+ mov rax, CpuInitFloatPointUnit
+ sub rsp, 20h
+ call rax
+ add rsp, 20h
+
+;
+; Load C Function pointer and wakeup manner location
+;
+ mov edi, esi
+ add edi, CProcedureLocation
+ add esi, WakeUpApManner ; esi = WakeUpApManner Address Location
+
+WakeUpThisAp64::
+
+ mov rax, qword ptr [edi]
+
+ test rax, rax
+ jz CheckWakeUpCounterInit64
+
+ push rbp
+ push rbx
+ push rsi
+ push rdi
+
+ sub rsp, 20h
+ call rax
+ add rsp, 20h
+
+ pop rdi
+ pop rsi
+ pop rbx
+ pop rbp
+
+CheckWakeUpCounterInit64::
+
+ cmp dword ptr [esi], WakeUpApCounterInit
+ jnz CheckWakeUpManner64
+
+;
+; Initialize MONITOR_MWAIT_DATA data structure per thread
+;
+ xor rcx, rcx
+ mov qword ptr [rsp + 0], rcx ; BreakToRunApSignal
+ mov qword ptr [rsp + 8], rcx ; HltLoopBreakCounter
+ mov qword ptr [rsp + 16], rcx ; MwaitLoopBreakCounter
+ mov qword ptr [rsp + 24], rcx ; RunLoopBreakCounter
+ mov qword ptr [rsp + 32], rcx ; MwaitLoopBreakCounter32
+ mov qword ptr [rsp + 40], rcx ; RunLoopBreakCounter32
+ mov qword ptr [rsp + 48], rcx ; WakeUpApVectorChangeFlag
+ mov qword ptr [rsp + 56], rcx ; MwaitTargetCstate
+
+WaitWakeUpMannerAssigned::
+
+ pause
+ cmp dword ptr [esi], WakeUpApCounterInit
+ jz WaitWakeUpMannerAssigned
+
+CheckWakeUpManner64::
+
+ pause
+ mov edx, dword ptr [esi]
+ cmp edx, WakeUpApPerHltLoop
+ jz HltApLoop64
+
+ cmp edx, WakeUpApPerMwaitLoop
+ jz ApMwaitLoop64
+
+ cmp edx, WakeUpApPerRunLoop
+ jz CheckRunSignal64
+
+ jmp JumpToCompatibility32Mode
+
+ApMwaitLoop64::
+
+ cli
+ mov rax, rsp ; Set Monitor Address
+ xor rcx, rcx
+ xor rdx, rdx
+ DB 0fh, 1, 0c8h ; MONITOR
+ mov rax, qword ptr [rsp + 56] ; Mwait Target C-State per rax[7:4]
+ DB 0fh, 1, 0c9h ; MWAIT
+
+CheckRunSignal64::
+
+ cmp qword ptr [rsp], rbx ; Check if run signal correct?
+ jnz CheckWakeUpManner64 ; Unknown break, go checking run manner
+
+ jmp WakeUpThisAp64 ; Jmp to execute AP task
+
+HltApLoop64::
+
+ cli
+ hlt
+ jmp HltApLoop64 ; Jump to halt loop
+
+
+JumpToCompatibility32Mode::
+
+ db 0FFh, 6Dh, 0 ; jmp pword ptr [rbp+0] ; Far jump to m16:32 for 32-bits compatibility mode
+
+RunLoopAndMwaitLoop32Jump:
+
+ dd 0h ; m32 part of m16:32
+ dw 20h ; m16 part of m16:32
+
+RunLoopAndMwaitLoop32::
+
+ db 66h, 0B8h, 18h, 00h ; mov ax, 18h
+ db 66h, 8Eh, 0D8h ; mov ds, ax
+ db 8eh, 0d0h ; mov ss, ax
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Read CR0.
+ db 0Fh, 0BAh, 0F0h, 1Fh ; btr eax, 31 ; Reset PG=0.
+ db 0Fh, 22h, 0C0h ; mov cr0, eax ; Write CR0.
+
+ db 0B9h
+ dd 0C0000080h ; mov ecx, 0c0000080h ; EFER MSR number.
+ db 0Fh, 32h ; rdmsr ; Read EFER.
+ db 0Fh, 0BAh, 0F0h, 08h ; btr eax, 8 ; Reset LME=0.
+ db 0Fh, 30h ; wrmsr ; Write EFER.
+
+ db 0Fh, 20h, 0E0h ; mov eax, cr4
+ db 24h, 0DFh ; and al, 0DFh ; Reset PAE=0 in CR4 bit 5
+ db 0Fh, 22h, 0E0h ; mov cr4, eax
+
+CheckWakeUpManner32::
+
+ pause
+ cmp dword ptr [rsi], WakeUpApPerMwaitLoop32 ; Use rsi for esi per compling in 64-bits mode
+ jnz CheckRunSignal32
+
+ cli
+ mov eax, esp ; Set Monitor Address
+ xor ecx, ecx
+ xor edx, edx
+ DB 0fh, 1, 0c8h ; MONITOR
+ mov eax, dword ptr [rsp + 56] ; Mwait Target C-State per eax[7:4]
+ DB 0fh, 1, 0c9h ; MWAIT
+
+
+CheckRunSignal32::
+
+ cmp dword ptr [rsp], ebx ; Check if run signal correct?
+ jnz CheckWakeUpManner32 ; Unknown break, go checking run manner
+
+ db 0Fh, 20h, 0E0h ; mov eax, cr4
+ db 0Ch, 20h ; or al, 20h ; Set PAE=1 in CR4 bit 5
+ db 0Fh, 22h, 0E0h ; mov cr4, eax
+
+ db 0B9h
+ dd 0C0000080h ; mov ecx, 0c0000080h ; EFER MSR number.
+ db 0Fh, 32h ; rdmsr ; Read EFER.
+ db 0Fh, 0BAh, 0E8h, 08h ; bts eax, 8 ; Set LME=1.
+ db 0Fh, 30h ; wrmsr ; Write EFER.
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Read CR0.
+ db 0Fh, 0BAh, 0E8h, 1Fh ; bts eax, 31 ; Set PG=1.
+ db 0Fh, 22h, 0C0h ; mov cr0, eax ; Write CR0.
+
+ db 67h, 0EAh ; far jump back to 64-bits long mode
+
+RunLoopAndMwaitLoop64Jump:
+
+ dd 0h ; 32-bit offset
+ dw 38h ; 16-bit selector
+
+RunLoopAndMwaitLoop64::
+
+ mov ax, 30h
+ mov ds, ax
+ mov ss, ax
+
+ jmp WakeUpThisAp64
+
+RendezvousFunnelProc ENDP
+RendezvousFunnelProcEnd::
+
+
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+AsmGetAddressMap PROC PUBLIC
+
+ mov rax, offset RendezvousFunnelProcStart
+ mov qword ptr [rcx], rax
+ mov qword ptr [rcx+8h], NemInit - RendezvousFunnelProcStart
+ mov qword ptr [rcx+10h], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov qword ptr [rcx+18h], LongModeStart - RendezvousFunnelProcStart
+ mov qword ptr [rcx+20h], LONG_JUMP - RendezvousFunnelProcStart
+ mov qword ptr [rcx+28h], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+
+ ret
+
+AsmGetAddressMap ENDP
+
+AsmAcquireMPLock PROC PUBLIC
+
+ mov al, NotVacantFlag
+TryGetLock:
+ xchg al, byte ptr [rcx]
+ cmp al, VacantFlag
+ jz LockObtained
+
+ pause
+ jmp TryGetLock
+
+LockObtained:
+ ret
+
+AsmAcquireMPLock ENDP
+
+AsmReleaseMPLock PROC PUBLIC
+
+ mov al, VacantFlag
+ xchg al, byte ptr [rcx]
+
+ ret
+
+AsmReleaseMPLock ENDP
+
+;-------------------------------------------------------------------------------------
+;AsmExchangeRole procedure follows. This procedure executed by current BSP, that is
+;about to become an AP. It switches it'stack with the current AP.
+;AsmExchangeRole (IN CPU_EXCHANGE_INFO *MyInfo, IN CPU_EXCHANGE_INFO *OthersInfo);
+;-------------------------------------------------------------------------------------
+CPU_SWITCH_STATE_IDLE equ 0
+CPU_SWITCH_STATE_STORED equ 1
+CPU_SWITCH_STATE_LOADED equ 2
+
+AsmExchangeRole PROC PUBLIC
+ ; DO NOT call other functions in this function, since 2 CPU may use 1 stack
+ ; at the same time. If 1 CPU try to call a functiosn, stack will be corrupted.
+
+ push rax
+ push rbx
+ push rcx
+ push rdx
+ push rsi
+ push rdi
+ push rbp
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+
+ mov rax, cr0
+ push rax
+
+ mov rax, cr4
+ push rax
+
+ ; rsi contains MyInfo pointer
+ mov rsi, rcx
+
+ ; rdi contains OthersInfo pointer
+ mov rdi, rdx
+
+ ;Store EFLAGS, GDTR and IDTR regiter to stack
+ pushfq
+ sgdt fword ptr [rsi + 16]
+ sidt fword ptr [rsi + 26]
+
+ ; Store the its StackPointer
+ mov qword ptr [rsi + 8], rsp
+
+ ; update its switch state to STORED
+ mov al, NotVacantFlag
+TryLock1:
+ xchg al, byte ptr [rsi]
+ cmp al, VacantFlag
+ jz LockObtained1
+ pause
+ jmp TryLock1
+
+LockObtained1:
+ mov byte ptr [rsi + 1], CPU_SWITCH_STATE_STORED
+ xchg al, byte ptr [rsi]
+
+WaitForOtherStored::
+ ; wait until the other CPU finish storing its state
+ mov al, NotVacantFlag
+TryLock2:
+ xchg al, byte ptr [rdi]
+ cmp al, VacantFlag
+ jz LockObtained2
+ PAUSE32
+ jmp TryLock2
+
+LockObtained2:
+ mov bl, byte ptr [rdi + 1]
+ xchg al, byte ptr [rdi]
+ cmp bl, CPU_SWITCH_STATE_STORED
+ jb WaitForOtherStored
+
+ ; Since another CPU already stored its state, load them
+ ; load GDTR value
+ lgdt fword ptr [rdi + 16]
+
+ ; load IDTR value
+ lidt fword ptr [rdi + 26]
+
+ ; load its future StackPointer
+ mov rsp, qword ptr [rdi + 8]
+
+ ; update its switch state to LOADED
+ mov al, NotVacantFlag
+TryLock3:
+ xchg al, byte ptr [rsi]
+ cmp al, VacantFlag
+ jz LockObtained3
+ PAUSE32
+ jmp TryLock3
+
+LockObtained3:
+ mov byte ptr [rsi+1], CPU_SWITCH_STATE_LOADED
+ xchg al, byte ptr [rsi]
+
+WaitForOtherLoaded::
+ ; wait until the other CPU finish loading new state,
+ ; otherwise the data in stack may corrupt
+ mov al, NotVacantFlag
+TryLock4:
+ xchg al, byte ptr [rdi]
+ cmp al, VacantFlag
+ jz LockObtained4
+ PAUSE32
+ jmp TryLock4
+
+LockObtained4:
+ mov bl, byte ptr [rdi+1]
+ xchg al, byte ptr [rdi]
+ cmp bl, CPU_SWITCH_STATE_LOADED
+ jb WaitForOtherLoaded
+
+ ; since the other CPU already get the data it want, leave this procedure
+ popfq
+
+ pop rax
+ mov cr4, rax
+
+ pop rax
+ mov cr0, rax
+
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ pop rbp
+ pop rdi
+ pop rsi
+ pop rdx
+ pop rcx
+ pop rbx
+ pop rax
+
+ ret
+AsmExchangeRole ENDP
+
+AsmWaitMem PROC PUBLIC
+AsmWaitMemS:
+ pause
+ cmp qword ptr [rcx], rdx
+ jz AsmWaitMemS
+ ret
+
+AsmWaitMem ENDP
+
+text ENDS
+
+END
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/ProcessorDef.h b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/ProcessorDef.h new file mode 100644 index 0000000000..406e891c17 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/ProcessorDef.h @@ -0,0 +1,53 @@ +/** @file
+ Definition for EM64T processor.
+
+ Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _PROCESSOR_DEF_H
+#define _PROCESSOR_DEF_H
+
+#pragma pack(1)
+
+typedef struct {
+ UINT16 Offset15To0;
+ UINT16 SegmentSelector;
+ UINT16 Attributes;
+ UINT16 Offset31To16;
+ UINT32 Offset63To32;
+ UINT32 Reserved;
+} INTERRUPT_GATE_DESCRIPTOR;
+
+#pragma pack()
+
+typedef struct {
+ UINT8 *RendezvousFunnelAddress;
+ UINTN PModeEntryOffset;
+ UINTN FlatJumpOffset;
+ UINTN LModeEntryOffset;
+ UINTN LongJumpOffset;
+ UINTN Size;
+} MP_ASSEMBLY_ADDRESS_MAP;
+
+/**
+ Get address map of RendezvousFunnelProc.
+
+ @param[out] AddressMap Output buffer for address map information
+
+**/
+VOID
+AsmGetAddressMap (
+ OUT MP_ASSEMBLY_ADDRESS_MAP *AddressMap
+ );
+
+#endif
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/VirtualMemory.h b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/VirtualMemory.h new file mode 100644 index 0000000000..48a9159e38 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/VirtualMemory.h @@ -0,0 +1,131 @@ +/** @file
+ x64 Long Mode Virtual Memory Management Definitions.
+
+ Copyright (c) 2004 - 2016, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _VIRTUAL_MEMORY_H_
+#define _VIRTUAL_MEMORY_H_
+
+#pragma pack(1)
+
+///
+/// Page-Map Level-4 Offset (PML4) and
+/// Page-Directory-Pointer Offset (PDPE) entries 4K & 2MB
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; ///< 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; ///< 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; ///< 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; ///< 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; ///< 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; ///< 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Reserved : 1; ///< Reserved
+ UINT64 MustBeZero : 2; ///< Must Be Zero
+ UINT64 Available : 3; ///< Available for use by system software
+ UINT64 PageTableBaseAddress : 40; ///< Page Table Base Address
+ UINT64 AvabilableHigh : 11; ///< Available for use by system software
+ UINT64 Nx : 1; ///< No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} x64_PAGE_MAP_AND_DIRECTORY_POINTER_2MB_4K;
+
+///
+/// Page-Directory Offset 4K
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; ///< 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; ///< 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; ///< 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; ///< 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; ///< 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; ///< 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Reserved : 1; ///< Reserved
+ UINT64 MustBeZero : 1; ///< Must Be Zero
+ UINT64 Reserved2 : 1; ///< Reserved
+ UINT64 Available : 3; ///< Available for use by system software
+ UINT64 PageTableBaseAddress : 40; ///< Page Table Base Address
+ UINT64 AvabilableHigh : 11; ///< Available for use by system software
+ UINT64 Nx : 1; ///< No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} x64_PAGE_DIRECTORY_ENTRY_4K;
+
+///
+/// Page Table Entry 4K
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; ///< 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; ///< 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; ///< 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; ///< 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; ///< 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; ///< 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Dirty : 1; ///< 0 = Not Dirty, 1 = written by processor on access to page
+ UINT64 PAT : 1; ///< 0 = Ignore Page Attribute Table
+ UINT64 Global : 1; ///< 0 = Not global page, 1 = global page TLB not cleared on CR3 write
+ UINT64 Available : 3; ///< Available for use by system software
+ UINT64 PageTableBaseAddress : 40; ///< Page Table Base Address
+ UINT64 AvabilableHigh : 11; ///< Available for use by system software
+ UINT64 Nx : 1; ///< 0 = Execute Code, 1 = No Code Execution
+ } Bits;
+ UINT64 Uint64;
+} x64_PAGE_TABLE_ENTRY_4K;
+
+///
+/// Page Table Entry 2MB
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; ///< 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; ///< 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; ///< 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; ///< 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; ///< 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; ///< 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Dirty : 1; ///< 0 = Not Dirty, 1 = written by processor on access to page
+ UINT64 MustBe1 : 1; ///< Must be 1
+ UINT64 Global : 1; ///< 0 = Not global page, 1 = global page TLB not cleared on CR3 write
+ UINT64 Available : 3; ///< Available for use by system software
+ UINT64 PAT : 1; ///<
+ UINT64 MustBeZero : 8; ///< Must be zero;
+ UINT64 PageTableBaseAddress : 31; ///< Page Table Base Address
+ UINT64 AvabilableHigh : 11; ///< Available for use by system software
+ UINT64 Nx : 1; ///< 0 = Execute Code, 1 = No Code Execution
+ } Bits;
+ UINT64 Uint64;
+} x64_PAGE_TABLE_ENTRY_2M;
+
+typedef struct {
+ UINT64 Present : 1; ///< 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; ///< 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; ///< 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; ///< 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; ///< 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; ///< 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Dirty : 1; ///< 0 = Not Dirty, 1 = written by processor on access to page
+ UINT64 Reserved : 57;
+} x64_PAGE_TABLE_ENTRY_COMMON;
+
+typedef union {
+ x64_PAGE_TABLE_ENTRY_4K Page4k;
+ x64_PAGE_TABLE_ENTRY_2M Page2Mb;
+ x64_PAGE_TABLE_ENTRY_COMMON Common;
+} x64_PAGE_TABLE_ENTRY;
+
+#pragma pack()
+
+#endif
+
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/cpu.S b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/cpu.S new file mode 100644 index 0000000000..0812f8ea2b --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/cpu.S @@ -0,0 +1,1658 @@ +## @file
+# Assembly code for the x64 resources.
+#
+# Copyright (c) 1999 - 2016, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+//=============================================================================
+// Debug Macros
+//=============================================================================
+
+//ENABLE_DEADLOOP = Deadloop on exception handler entry, exit
+
+// #define ENABLE_DEADLOOP 1
+#ifdef ENABLE_DEADLOOP
+ .macro DEADLOOP
+ jmp .
+ .endm
+#else
+ .macro DEADLOOP
+ .endm
+#endif
+
+ .text
+ ASM_FUNCTION_REMOVE_IF_UNREFERENCED
+
+//.globl _CpuFlushTlb
+.globl __outp
+.globl ASM_PFX(mErrorCodeFlag)
+
+//
+// Float control word initial value:
+// all exceptions masked, double-extended-precision, round-to-nearest
+//
+mFpuControlWord:
+.word 0x37F
+
+//
+// Multimedia-extensions control word:
+// all exceptions masked, round-to-nearest, flush to zero for masked underflow
+//
+mMmxControlWord:
+.long 0x1F80
+
+//
+//
+//
+//------------------------------------------------------------------------------
+// Generic IDT Vector Handlers for the Host. They are all the same so they
+// will compress really well.
+//
+// By knowing the return address for Vector 00 you can can calculate the
+// vector number by looking at the call CommonInterruptEntry return address.
+// (return address - (AsmIdtVector00 + 5))/8 == IDT index
+//
+//------------------------------------------------------------------------------
+
+ .p2align 3
+
+.globl ASM_PFX(AsmIdtVector00)
+ASM_PFX(AsmIdtVector00):
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector00, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector00 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector01, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector01 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector02, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector02 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector03, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector03 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector04, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector04 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector05, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector05 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector06, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector06 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector07, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector07 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector08, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector08 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector09, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector09 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector0a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector0a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector0b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector0b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector0c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector0c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector0d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector0d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector0e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector0e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector0f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector0f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector10, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector10 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector11, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector11 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector12, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector12 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector13, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector13 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector14, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector14 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector15, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector15 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector16, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector16 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector17, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector17 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector18, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector18 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector19, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector19 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector1a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector1a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector1b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector1b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector1c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector1c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector1d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector1d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector1e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector1e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector1f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector1f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector20, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector20 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector21, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector21 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector22, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector22 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector23, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector23 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector24, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector24 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector25, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector25 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector26, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector26 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector27, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector27 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector28, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector28 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector29, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector29 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector2a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector2a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector2b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector2b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector2c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector2c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector2d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector2d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector2e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector2e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector2f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector2f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector30, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector30 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector31, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector31 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector32, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector32 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector33, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector33 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector34, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector34 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector35, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector35 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector36, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector36 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector37, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector37 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector38, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector38 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector39, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector39 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector3a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector3a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector3b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector3b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector3c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector3c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector3d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector3d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector3e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector3e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector3f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector3f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector40, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector40 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector41, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector41 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector42, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector42 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector43, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector43 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector44, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector44 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector45, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector45 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector46, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector46 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector47, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector47 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector48, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector48 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector49, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector49 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector4a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector4a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector4b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector4b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector4c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector4c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector4d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector4d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector4e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector4e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector4f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector4f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector50, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector50 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector51, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector51 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector52, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector52 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector53, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector53 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector54, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector54 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector55, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector55 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector56, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector56 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector57, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector57 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector58, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector58 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector59, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector59 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector5a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector5a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector5b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector5b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector5c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector5c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector5d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector5d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector5e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector5e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector5f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector5f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector60, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector60 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector61, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector61 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector62, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector62 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector63, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector63 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector64, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector64 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector65, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector65 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector66, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector66 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector67, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector67 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector68, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector68 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector69, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector69 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector6a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector6a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector6b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector6b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector6c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector6c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector6d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector6d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector6e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector6e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector6f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector6f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector70, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector70 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector71, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector71 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector72, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector72 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector73, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector73 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector74, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector74 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector75, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector75 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector76, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector76 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector77, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector77 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector78, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector78 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector79, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector79 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector7a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector7a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector7b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector7b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector7c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector7c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector7d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector7d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector7e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector7e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector7f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector7f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector80, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector80 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector81, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector81 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector82, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector82 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector83, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector83 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector84, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector84 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector85, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector85 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector86, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector86 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector87, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector87 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector88, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector88 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector89, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector89 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector8a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector8a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector8b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector8b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector8c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector8c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector8d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector8d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector8e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector8e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector8f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector8f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector90, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector90 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector91, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector91 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector92, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector92 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector93, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector93 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector94, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector94 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector95, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector95 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector96, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector96 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector97, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector97 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector98, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector98 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector99, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector99 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector9a, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector9a / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector9b, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector9b / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector9c, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector9c / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector9d, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector9d / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector9e, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector9e / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vector9f, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vector9f / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora0, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora0 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora1, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora1 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora2, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora2 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora3, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora3 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora4, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora4 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora5, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora5 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora6, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora6 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora7, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora7 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora8, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora8 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectora9, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectora9 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectoraa, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectoraa / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorab, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorab / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorac, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorac / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorad, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorad / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorae, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorae / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectoraf, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectoraf / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb0, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb0 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb1, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb1 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb2, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb2 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb3, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb3 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb4, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb4 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb5, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb5 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb6, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb6 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb7, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb7 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb8, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb8 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorb9, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorb9 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorba, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorba / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorbb, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorbb / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorbc, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorbc / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorbd, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorbd / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorbe, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorbe / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorbf, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorbf / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc0, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc0 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc1, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc1 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc2, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc2 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc3, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc3 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc4, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc4 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc5, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc5 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc6, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc6 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc7, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc7 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc8, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc8 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorc9, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorc9 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorca, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorca / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorcb, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorcb / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorcc, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorcc / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorcd, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorcd / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorce, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorce / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorcf, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorcf / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord0, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord0 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord1, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord1 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord2, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord2 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord3, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord3 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord4, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord4 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord5, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord5 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord6, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord6 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord7, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord7 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord8, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord8 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectord9, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectord9 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorda, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorda / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectordb, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectordb / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectordc, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectordc / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectordd, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectordd / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorde, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorde / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectordf, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectordf / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore0, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore0 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore1, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore1 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore2, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore2 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore3, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore3 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore4, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore4 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore5, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore5 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore6, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore6 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore7, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore7 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore8, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore8 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectore9, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectore9 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorea, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorea / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectoreb, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectoreb / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorec, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorec / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectored, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectored / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectoree, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectoree / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectoref, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectoref / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf0, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf0 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf1, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf1 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf2, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf2 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf3, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf3 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf4, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf4 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf5, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf5 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf6, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf6 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf7, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf7 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf8, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf8 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorf9, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorf9 / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorfa, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorfa / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorfb, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorfb / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorfc, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorfc / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorfd, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorfd / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorfe, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorfe / 8
+ nop
+
+ callq ASM_PFX(CommonInterruptEntry)
+ .set vectorff, . - ASM_PFX(AsmIdtVector00) - 5
+ .short vectorff / 8
+ nop
+
+//---------------------------------------//
+// CommonInterruptEntry //
+//---------------------------------------//
+// The follow algorithm is used for the common interrupt routine.
+
+// +---------------------+ <-- 16-byte aligned ensured by processor
+// 40 + Old SS +
+// +---------------------+
+// 32 + Old RSP +
+// +---------------------+
+// 24 + RFlags +
+// +---------------------+
+// 16 + CS +
+// +---------------------+
+// 8 + RIP +
+// +---------------------+
+// 0 + RBP + <-- RBP, 16-byte aligned
+// +---------------------+
+// -8 + RCX / Vector Number +
+// +---------------------+
+//-16 + Error Code +
+// +---------------------+
+//
+
+.globl ASM_PFX(CommonInterruptEntry)
+ASM_PFX(CommonInterruptEntry):
+
+ DEADLOOP
+
+ cli
+ //
+ // All interrupt handlers are invoked through interrupt gates, so
+ // IF flag automatically cleared at the entry point
+ //
+ //
+ // Calculate vector number
+ //
+ xchg (%rsp), %rcx // get the return address of call
+ movzwl (%rcx), %ecx // actually, it is the address of vector number.
+ cmp $32, %ecx // Intel reserved vector for exceptions?
+ jae L_NoErrorCode
+ bt %ecx, ASM_PFX(mErrorCodeFlag)(%rip)
+ jc L_ErrorCode
+
+L_NoErrorCode:
+ pushq (%rsp) // Copy RCX/Vector to next stack slot
+ mov %rbp, 8(%rsp) // Save RBP
+ push $0 // Push dummy error code
+ jmp L_SaveState
+
+L_ErrorCode:
+ xchg 8(%rsp), %rbp // Put RBP into Error Code slot
+ push %rbp // Push Error Code to stack
+
+ //
+ // Since here the stack pointer is 16-byte aligned, so
+ // EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
+ // is 16-byte aligned
+ //
+
+//// UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax//
+//// UINT64 R8, R9, R10, R11, R12, R13, R14, R15//
+L_SaveState:
+ mov %rsp, %rbp
+ add $16, %rbp // Fixup RBP after RCX/Vector, Error Code swizzle
+ push %r15
+ push %r14
+ push %r13
+ push %r12
+ push %r11
+ push %r10
+ push %r9
+ push %r8
+ push %rax
+ push -8(%rbp) // RCX
+ push %rdx
+ push %rbx
+ pushq 32(%rbp) // RSP
+ pushq 0(%rbp) // RBP
+ push %rsi
+ push %rdi
+
+//// UINT64 Gs, Fs, Es, Ds, Cs, Ss// insure high 16 bits of each is zero
+ movzwq 40(%rbp), %rax
+ push %rax // for ss
+ movzwq 16(%rbp), %rax
+ push %rax // for cs
+ mov %ds, %eax
+ push %rax
+ mov %es, %eax
+ push %rax
+ mov %fs, %eax
+ push %rax
+ mov %gs, %eax
+ push %rax
+
+ mov %rcx, -8(%rbp) // save vector number
+
+//// UINT64 Rip
+ pushq 8(%rbp)
+
+//// UINT64 Gdtr[2], Idtr[2]//
+ sub $16, %rsp // IDT base / limit
+ sidt 6(%rsp)
+ movzwq 6(%rsp), %rax
+ mov %rax, (%rsp)
+
+ sub $16, %rsp // GDT base / limit
+ sgdt 6(%rsp)
+ movzwq 6(%rsp), %rax
+ mov %rax, (%rsp)
+
+//// UINT64 Ldtr, Tr//
+ xor %rax, %rax
+ str %ax
+ push %rax
+ sldt %ax
+ push %rax
+
+//// UINT64 RFlags//
+ pushq 24(%rbp)
+
+//// UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8//
+ mov %cr8, %rax
+ push %rax
+ mov %cr4, %rax
+ or $0x208, %rax
+ mov %rax, %cr4
+ push %rax
+ mov %cr3, %rax
+ push %rax
+ mov %cr2, %rax
+ push %rax
+ xor %rax, %rax
+ push %rax
+ mov %cr0, %rax
+ push %rax
+
+//// UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7//
+ mov %db7, %rax
+ push %rax
+ mov %db6, %rax
+ push %rax
+ mov %db3, %rax
+ push %rax
+ mov %db2, %rax
+ push %rax
+ mov %db1, %rax
+ push %rax
+ mov %db0, %rax
+ push %rax
+
+//// FX_SAVE_STATE_X64 FxSaveState//
+
+ sub $512, %rsp
+ mov %rsp, %rdi
+ .byte 0x0f, 0xae, 0x07 //fxsave [rdi]
+
+//// UINT32 ExceptionData//
+ pushq -16(%rbp)
+
+//// call into exception handler
+ mov -8(%rbp), %rcx
+ mov ExternalVectorTablePtr(%rip), %rax // get the interrupt vectors base
+ mov (%rax,%rcx,8),%rax
+ or %rax, %rax // NULL?
+
+ je L_nonNullValue
+
+//// Prepare parameter and call
+ mov %rsp, %rdx
+
+ //
+ // Per X64 calling convention, allocate maximum parameter stack space
+ // and make sure RSP is 16-byte aligned
+ //
+ DEADLOOP
+
+ sub $0x28, %rsp
+ callq *%rax
+ add $0x28, %rsp
+
+ DEADLOOP
+
+L_nonNullValue:
+ cli
+//// UINT64 ExceptionData
+ pop %rax
+ mov %rax, -16(%rbp)
+
+//// FX_SAVE_STATE_X64 FxSaveState//
+
+ mov %rsp, %rsi
+ .byte 0x0f, 0xae, 0x0e // fxrstor [rsi]
+ add $512, %rsp
+
+//// UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7//
+ add $48, %rsp
+ xor %rax, %rax //// always clear dr6
+ mov %rax, %dr6
+
+//// UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8//
+ pop %rax
+ mov %rax, %cr0
+ add $8, %rsp // not for Cr1
+ pop %rax
+ mov %rax, %cr2
+ pop %rax
+ mov %rax, %cr3
+ pop %rax
+ mov %rax, %cr4
+ pop %rax
+ mov %rax, %cr8
+
+//// UINT64 RFlags
+ popq 24(%rbp)
+
+//// UINT64 Ldtr, Tr//
+//// UINT64 Gdtr[2], Idtr[2]//
+//// Best not let anyone mess with these particular registers...
+ add $48, %rsp
+
+//// UINT64 Rip
+ popq 8(%rbp)
+
+//// UINT64 Gs, Fs, Es, Ds, Cs, Ss//
+ pop %rax
+ // mov gs, rax // not for gs
+ pop %rax
+ // mov fs, rax // not for fs
+ // (X64 will not use fs and gs, so we do not restore it)
+ pop %rax
+ mov %eax, %es
+ pop %rax
+ mov %eax, %ds
+ popq 16(%rbp) // for cs
+ popq 40(%rbp) // for ss
+
+//// UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax//
+//// UINT64 R8, R9, R10, R11, R12, R13, R14, R15//
+ pop %rdi
+ pop %rsi
+ add $8, %rsp // not for rbp
+ popq 32(%rbp) // for rsp
+ pop %rbx
+ pop %rdx
+ pop %rcx
+ pop %rax
+ pop %r8
+ pop %r9
+ pop %r10
+ pop %r11
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+
+ mov %rbp, %rsp
+ pop %rbp
+
+ DEADLOOP
+
+ iretq
+
+//
+//Initializes floating point units for requirement of UEFI specification.
+//
+// This function initializes floating-point control word to 0x037F (all exceptions
+// masked,double-extended-precision, round-to-nearest) and multimedia-extensions control word
+// (if supported) to 0x1F80 (all exceptions masked, round-to-nearest, flush to zero
+// for masked underflow).
+//
+.globl ASM_PFX(CpuInitFloatPointUnit)
+ASM_PFX(CpuInitFloatPointUnit):
+
+ //
+ // Initialize floating point units
+ //
+ // The following opcodes stand for instruction 'finit'
+ // to be supported by some 64-bit assemblers
+ //
+
+ //finit
+ .byte 0x9B, 0xDB, 0xE3
+ fldcw mFpuControlWord(%rip)
+
+ //
+ // Set OSFXSR bit 9 in CR4
+ //
+ mov %cr4,%rax
+ or $0x200,%rax
+ mov %rax,%cr4
+
+ ldmxcsr mMmxControlWord(%rip)
+ retq
+
+.globl ASM_PFX(InitializeExternalVectorTablePtr)
+ASM_PFX(InitializeExternalVectorTablePtr):
+ mov %rcx, ExternalVectorTablePtr(%rip)
+ retq
+
+ExternalVectorTablePtr:
+.quad 0 // point to the external interrupt vector table
+
+__outp:
+ mov %edx,%eax
+ movzwl %cx,%edx
+ out %al,(%dx)
+ retq
diff --git a/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/cpu.asm b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/cpu.asm new file mode 100644 index 0000000000..03837c5670 --- /dev/null +++ b/Silicon/BroxtonSoC/BroxtonSiPkg/Cpu/CpuInit/Dxe/x64/cpu.asm @@ -0,0 +1,413 @@ +;; @file
+; Assembly code for the x64 resources.
+;
+; Copyright (c) 2005 - 2016, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+;
+;;
+
+text SEGMENT
+
+EXTRN mErrorCodeFlag:DWORD ; Error code flags for exceptions
+
+ExternalVectorTablePtr QWORD 0 ; point to the external interrupt vector table
+
+;
+; Float control word initial value:
+; all exceptions masked, double-extended-precision, round-to-nearest
+;
+mFpuControlWord DW 037Fh
+;
+; Multimedia-extensions control word:
+; all exceptions masked, round-to-nearest, flush to zero for masked underflow
+;
+mMmxControlWord DD 01F80h
+
+
+InitializeExternalVectorTablePtr PROC PUBLIC
+ mov ExternalVectorTablePtr, rcx
+ ret
+InitializeExternalVectorTablePtr ENDP
+;
+;
+;
+;------------------------------------------------------------------------------
+; Generic IDT Vector Handlers for the Host. They are all the same so they
+; will compress really well.
+;
+; By knowing the return address for Vector 00 you can can calculate the
+; vector number by looking at the call CommonInterruptEntry return address.
+; (return address - (AsmIdtVector00 + 5))/8 == IDT index
+;
+;------------------------------------------------------------------------------
+
+ALIGN 8
+
+PUBLIC AsmIdtVector00
+
+AsmIdtVector00 LABEL BYTE
+REPEAT 256
+ call CommonInterruptEntry
+ dw ( $ - AsmIdtVector00 - 5 ) / 8 ; vector number
+ nop
+ENDM
+
+
+;---------------------------------------;
+; CommonInterruptEntry ;
+;---------------------------------------;
+; The follow algorithm is used for the common interrupt routine.
+
+;
+; +---------------------+ <-- 16-byte aligned ensured by processor
+; + Old SS +
+; +---------------------+
+; + Old RSP +
+; +---------------------+
+; + RFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + RIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + RCX / Vector Number +
+; +---------------------+
+; + RBP +
+; +---------------------+ <-- RBP, 16-byte aligned
+;
+
+CommonInterruptEntry PROC PUBLIC
+ cli
+ ;
+ ; All interrupt handlers are invoked through interrupt gates, so
+ ; IF flag automatically cleared at the entry point
+ ;
+ ;
+ ; Calculate vector number
+ ;
+ xchg rcx, [rsp] ; get the return address of call, actually, it is the address of vector number.
+ movzx ecx, word ptr [rcx]
+ cmp ecx, 32 ; Intel reserved vector for exceptions?
+ jae NoErrorCode
+ bt mErrorCodeFlag, ecx
+ jc @F
+
+NoErrorCode:
+ ;
+ ; Push a dummy error code on the stack
+ ; to maintain coherent stack map
+ ;
+ push [rsp]
+ mov qword ptr [rsp + 8], 0
+@@:
+ push rbp
+ mov rbp, rsp
+
+ ;
+ ; Since here the stack pointer is 16-byte aligned, so
+ ; EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
+ ; is 16-byte aligned
+ ;
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ push r15
+ push r14
+ push r13
+ push r12
+ push r11
+ push r10
+ push r9
+ push r8
+ push rax
+ push qword ptr [rbp + 8] ; RCX
+ push rdx
+ push rbx
+ push qword ptr [rbp + 48] ; RSP
+ push qword ptr [rbp] ; RBP
+ push rsi
+ push rdi
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
+ movzx rax, word ptr [rbp + 56]
+ push rax ; for ss
+ movzx rax, word ptr [rbp + 32]
+ push rax ; for cs
+ mov rax, ds
+ push rax
+ mov rax, es
+ push rax
+ mov rax, fs
+ push rax
+ mov rax, gs
+ push rax
+
+ mov [rbp + 8], rcx ; save vector number
+
+;; UINT64 Rip;
+ push qword ptr [rbp + 24]
+
+;; UINT64 Gdtr[2], Idtr[2];
+ sub rsp, 16
+ sidt fword ptr [rsp]
+ sub rsp, 16
+ sgdt fword ptr [rsp]
+
+;; UINT64 Ldtr, Tr;
+ xor rax, rax
+ str ax
+ push rax
+ sldt ax
+ push rax
+
+;; UINT64 RFlags;
+ push qword ptr [rbp + 40]
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ mov rax, cr8
+ push rax
+ mov rax, cr4
+ or rax, 208h
+ mov cr4, rax
+ push rax
+ mov rax, cr3
+ push rax
+ mov rax, cr2
+ push rax
+ xor rax, rax
+ push rax
+ mov rax, cr0
+ push rax
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov rax, dr7
+ push rax
+;; clear Dr7 while executing debugger itself
+ xor rax, rax
+ mov dr7, rax
+
+ mov rax, dr6
+ push rax
+;; insure all status bits in dr6 are clear...
+ xor rax, rax
+ mov dr6, rax
+
+ mov rax, dr3
+ push rax
+ mov rax, dr2
+ push rax
+ mov rax, dr1
+ push rax
+ mov rax, dr0
+ push rax
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ sub rsp, 512
+ mov rdi, rsp
+ db 0fh, 0aeh, 00000111y ;fxsave [rdi]
+
+;; UINT32 ExceptionData;
+ push qword ptr [rbp + 16]
+
+;; call into exception handler
+ mov rcx, [rbp + 8]
+ mov rax, ExternalVectorTablePtr ; get the interrupt vectors base
+ mov rax, [rax + rcx * 8]
+ or rax, rax ; NULL?
+
+ je nonNullValue;
+
+;; Prepare parameter and call
+; mov rcx, [rbp + 8]
+ mov rdx, rsp
+ ;
+ ; Per X64 calling convention, allocate maximum parameter stack space
+ ; and make sure RSP is 16-byte aligned
+ ;
+ sub rsp, 4 * 8 + 8
+ call rax
+ add rsp, 4 * 8 + 8
+
+nonNullValue:
+ cli
+;; UINT64 ExceptionData;
+ add rsp, 8
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ mov rsi, rsp
+ db 0fh, 0aeh, 00001110y ; fxrstor [rsi]
+ add rsp, 512
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ pop rax
+ mov dr0, rax
+ pop rax
+ mov dr1, rax
+ pop rax
+ mov dr2, rax
+ pop rax
+ mov dr3, rax
+;; skip restore of dr6. We cleared dr6 during the context save.
+ add rsp, 8
+ pop rax
+ mov dr7, rax
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ pop rax
+ mov cr0, rax
+ add rsp, 8 ; not for Cr1
+ pop rax
+ mov cr2, rax
+ pop rax
+ mov cr3, rax
+ pop rax
+ mov cr4, rax
+ pop rax
+ mov cr8, rax
+
+;; UINT64 RFlags;
+ pop qword ptr [rbp + 40]
+
+;; UINT64 Ldtr, Tr;
+;; UINT64 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add rsp, 48
+
+;; UINT64 Rip;
+ pop qword ptr [rbp + 24]
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
+ pop rax
+ ; mov gs, rax ; not for gs
+ pop rax
+ ; mov fs, rax ; not for fs
+ ; (X64 will not use fs and gs, so we do not restore it)
+ pop rax
+ mov es, rax
+ pop rax
+ mov ds, rax
+ pop qword ptr [rbp + 32] ; for cs
+ pop qword ptr [rbp + 56] ; for ss
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ pop rdi
+ pop rsi
+ add rsp, 8 ; not for rbp
+ pop qword ptr [rbp + 48] ; for rsp
+ pop rbx
+ pop rdx
+ pop rcx
+ pop rax
+ pop r8
+ pop r9
+ pop r10
+ pop r11
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+
+ mov rsp, rbp
+ pop rbp
+ add rsp, 16
+ iretq
+
+CommonInterruptEntry ENDP
+
+
+LongMode PROC PUBLIC
+
+in_long_mode::
+ ;
+ ; Debug Stop
+ ;
+ jmp in_long_mode
+
+ ;
+ ; We're in long mode, so marshall the arguments to call the
+ ; passed in function pointers
+ ; Recall
+ ; [ebp][10h] = HobStart
+ ; [ebp][18h] = Stack
+ ; [ebp][20h] = PpisNeededByDxeIplEntryPoint <--- Call this first (for each call, pass HOB pointer)
+ ; [ebp][28h] = DxeCoreEntryPoint <--- Call this second
+ ;
+ mov rbx, [rbp+18h] ; Setup the stack
+ mov rsp, rbx ; On a new stack now
+
+ mov rcx, [rbp+10h] ; Pass Hob Start in RCX
+ mov rax, [rbp+20h] ; Get the function pointer for
+ ; PpisNeededByDxeIplEntryPoint into EAX
+ call fword ptr [rax] ; Make the call into PpisNeededByDxeIplEntryPoint
+
+ mov ecx, [rbp+10h] ; Pass Hob Start in RCX
+ mov eax, [rbp+28h] ; Get the function pointer for
+ ; DxeCoreEntryPoint into EAX
+ call fword ptr [rax] ; Make the call into Dxe Core
+
+ call CommonInterruptEntry
+
+ mov rdi, CommonInterruptEntry
+
+ lgdt fword ptr [rdi]
+
+ lidt fword ptr [rdi]
+
+ call near ptr [rax] ; Make the call into PpisNeededByDxeIplEntryPoint
+
+ call rax
+
+ ;
+ ; Should never get here.
+ ;
+no_long_mode:
+ jmp no_long_mode
+ ;
+ ; WE SHOULD NEVER GET HERE!!!!!!!!!!!!!
+ ;
+LongMode endp
+
+;
+; Initializes floating point units for requirement of UEFI specification.
+;
+; This function initializes floating-point control word to 0x037F (all exceptions
+; masked,double-extended-precision, round-to-nearest) and multimedia-extensions control word
+; (if supported) to 0x1F80 (all exceptions masked, round-to-nearest, flush to zero
+; for masked underflow).
+;
+CpuInitFloatPointUnit PROC PUBLIC
+ ;
+ ; Initialize floating point units
+ ;
+ ; The following opcodes stand for instruction 'finit'
+ ; to be supported by some 64-bit assemblers
+ ;
+ DB 9Bh, 0DBh, 0E3h
+ fldcw mFpuControlWord
+
+ ;
+ ; Set OSFXSR bit 9 in CR4
+ ;
+ mov rax, cr4
+ or rax, 200h
+ mov cr4, rax
+
+ ldmxcsr mMmxControlWord
+ ret
+CpuInitFloatPointUnit ENDP
+
+text ENDS
+END
+
|