summaryrefslogtreecommitdiff
path: root/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm
diff options
context:
space:
mode:
authorGuo Mang <mang.guo@intel.com>2016-08-03 11:46:45 +0800
committerGuo Mang <mang.guo@intel.com>2016-08-04 10:33:10 +0800
commit9dfd62064d1d1a6344165febb44c7b0d0f3a6a1e (patch)
treee6734a38a607f96255aff50d13b92809779ff737 /BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm
parent177e76bd8e9a863b4bd06a95f0e7cb5e4851812f (diff)
downloadedk2-platforms-9dfd62064d1d1a6344165febb44c7b0d0f3a6a1e.tar.xz
BraswellPlatformPkg: Move IntelSiliconBasic to Common/Silicon/IntelSiliconBasic
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Guo Mang <mang.guo@intel.com> Reviewed-by: David Wei <david.wei@intel.com>
Diffstat (limited to 'BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm')
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuS3.c435
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.c484
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.h198
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.S159
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm163
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/PageTbl.c96
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/Semaphore.c62
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.S153
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm156
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.S1172
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.asm858
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.S94
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.asm104
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c66
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h98
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/MpService.c1759
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c1596
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h780
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf195
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.c492
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.h220
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.c1369
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.h76
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfileInternal.h190
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SyncTimer.c111
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.S198
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.asm201
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/PageTbl.c647
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/Semaphore.c104
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.S236
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.asm232
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.S822
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.asm504
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.S123
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.asm140
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.c302
-rw-r--r--BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.h106
37 files changed, 14701 insertions, 0 deletions
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuS3.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuS3.c
new file mode 100644
index 0000000000..6f577ed183
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuS3.c
@@ -0,0 +1,435 @@
+/** @file
+ Code for Processor S3 restoration
+
+ Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+typedef struct {
+ UINTN Lock;
+ VOID *StackStart;
+ UINTN StackSize;
+ VOID *ApFunction;
+ IA32_DESCRIPTOR GdtrProfile;
+ IA32_DESCRIPTOR IdtrProfile;
+ UINT32 BufferStart;
+ UINT32 Cr3;
+} MP_CPU_EXCHANGE_INFO;
+
+typedef struct {
+ UINT8 *RendezvousFunnelAddress;
+ UINTN PModeEntryOffset;
+ UINTN FlatJumpOffset;
+ UINTN Size;
+ UINTN LModeEntryOffset;
+ UINTN LongJumpOffset;
+} MP_ASSEMBLY_ADDRESS_MAP;
+
+/**
+ Get starting address and size of the rendezvous entry for APs.
+ Information for fixing a jump instruction in the code is also returned.
+
+ @param AddressMap Output buffer for address map information.
+
+**/
+VOID *
+EFIAPI
+AsmGetAddressMap (
+ MP_ASSEMBLY_ADDRESS_MAP *AddressMap
+ );
+
+#define LEGACY_REGION_SIZE (2 * 0x1000)
+#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
+
+ACPI_CPU_DATA mAcpiCpuData;
+UINT32 mNumberToFinish;
+MP_CPU_EXCHANGE_INFO *mExchangeInfo;
+BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
+VOID *mGdtForAp = NULL;
+VOID *mIdtForAp = NULL;
+VOID *mMachineCheckHandlerForAp = NULL;
+SPIN_LOCK mMsrLock;
+
+/**
+ Synch up the MTRR values for all processors.
+
+ @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
+
+ @retval None
+
+**/
+VOID
+EFIAPI
+LoadMtrrData (
+ EFI_PHYSICAL_ADDRESS MtrrTable
+ )
+{
+ MTRR_SETTINGS *MtrrSettings;
+
+ MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
+ MtrrSetAllMtrrs (MtrrSettings);
+}
+
+/**
+ Programs registers for the calling processor.
+ This function programs registers for the calling processor.
+
+ @param RegisterTable Pointer to register table of the running processor.
+
+**/
+VOID
+SetProcessorRegister (
+ CPU_REGISTER_TABLE *RegisterTable
+ )
+{
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
+ UINTN Index;
+ UINTN Value;
+
+ //
+ // Traverse Register Table of this logical processor
+ //
+ RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
+ for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
+ //
+ // Check the type of specified register
+ //
+ switch (RegisterTableEntry->RegisterType) {
+ //
+ // The specified register is Control Register
+ //
+ case ControlRegister:
+ switch (RegisterTableEntry->Index) {
+ case 0:
+ Value = AsmReadCr0 ();
+ Value = (UINTN) BitFieldWrite64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ (UINTN) RegisterTableEntry->Value
+ );
+ AsmWriteCr0 (Value);
+ break;
+ case 2:
+ Value = AsmReadCr2 ();
+ Value = (UINTN) BitFieldWrite64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ (UINTN) RegisterTableEntry->Value
+ );
+ AsmWriteCr2 (Value);
+ break;
+ case 3:
+ Value = AsmReadCr3 ();
+ Value = (UINTN) BitFieldWrite64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ (UINTN) RegisterTableEntry->Value
+ );
+ AsmWriteCr3 (Value);
+ break;
+ case 4:
+ Value = AsmReadCr4 ();
+ Value = (UINTN) BitFieldWrite64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ (UINTN) RegisterTableEntry->Value
+ );
+ AsmWriteCr4 (Value);
+ break;
+ default:
+ break;
+ }
+ break;
+ //
+ // The specified register is Model Specific Register
+ //
+ case Msr:
+ //
+ // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
+ //
+ AcquireSpinLock (&mMsrLock);
+ //
+ // If this function is called to restore register setting after INIT signal,
+ // there is no need to restore MSRs in register table.
+ //
+ if (RegisterTableEntry->ValidBitLength >= 64) {
+ //
+ // If length is not less than 64 bits, then directly write without reading
+ //
+ AsmWriteMsr64 (
+ RegisterTableEntry->Index,
+ RegisterTableEntry->Value
+ );
+ } else {
+ //
+ // Set the bit section according to bit start and length
+ //
+ AsmMsrBitFieldWrite64 (
+ RegisterTableEntry->Index,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ RegisterTableEntry->Value
+ );
+ }
+ ReleaseSpinLock (&mMsrLock);
+ break;
+ //
+ // Enable or disable cache
+ //
+ case CacheControl:
+ //
+ // If value of the entry is 0, then disable cache. Otherwise, enable cache.
+ //
+ if (RegisterTableEntry->Value == 0) {
+ AsmDisableCache ();
+ } else {
+ AsmEnableCache ();
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ AP initialization before SMBASE relocation in the S3 boot path.
+**/
+VOID
+EarlyMPRendezvousProcedure (
+ VOID
+ )
+{
+ CPU_REGISTER_TABLE *RegisterTableList;
+ UINT32 InitApicId;
+ UINTN Index;
+
+ LoadMtrrData (mAcpiCpuData.MtrrTable);
+
+ //
+ // Find processor number for this CPU.
+ //
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
+ InitApicId = GetInitialApicId ();
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {
+ SetProcessorRegister (&RegisterTableList[Index]);
+ break;
+ }
+ }
+
+ //
+ // Count down the number with lock mechanism.
+ //
+ InterlockedDecrement (&mNumberToFinish);
+}
+
+/**
+ AP initialization after SMBASE relocation in the S3 boot path.
+**/
+VOID
+MPRendezvousProcedure (
+ VOID
+ )
+{
+ CPU_REGISTER_TABLE *RegisterTableList;
+ UINT32 InitApicId;
+ UINTN Index;
+
+ ProgramVirtualWireMode ();
+ DisableLvtInterrupts ();
+
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
+ InitApicId = GetInitialApicId ();
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {
+ SetProcessorRegister (&RegisterTableList[Index]);
+ break;
+ }
+ }
+
+ //
+ // Count down the number with lock mechanism.
+ //
+ InterlockedDecrement (&mNumberToFinish);
+}
+
+/**
+ Prepares startup vector for APs.
+ This function prepares startup vector for APs.
+
+ @param WorkingBuffer The address of the work buffer.
+
+**/
+VOID
+PrepareAPStartupVector (
+ EFI_PHYSICAL_ADDRESS WorkingBuffer
+ )
+{
+ EFI_PHYSICAL_ADDRESS StartupVector;
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;
+
+ //
+ // Get the address map of startup code for AP,
+ // including code size, and offset of long jump instructions to redirect.
+ //
+ ZeroMem (&AddressMap, sizeof (AddressMap));
+ AsmGetAddressMap (&AddressMap);
+
+ StartupVector = WorkingBuffer;
+
+ //
+ // Copy AP startup code to startup vector, and then redirect the long jump
+ // instructions for mode switching.
+ //
+ CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
+ *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
+ if (AddressMap.LongJumpOffset != 0) {
+ *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
+ }
+
+ //
+ // Get the start address of exchange data between BSP and AP.
+ //
+ mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
+ ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
+
+ CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
+ CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
+
+ //
+ // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
+ //
+ CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
+ CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
+ CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
+
+ mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
+ mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
+ mExchangeInfo->BufferStart = (UINT32) StartupVector;
+ mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
+}
+
+/**
+ The function is invoked before SMBASE relocation in S3 path to restors CPU status.
+
+ The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
+ and restores MTRRs for both BSP and APs.
+
+**/
+VOID
+EarlyInitializeCpu (
+ VOID
+ )
+{
+ CPU_REGISTER_TABLE *RegisterTableList;
+ UINT32 InitApicId;
+ UINTN Index;
+
+ //
+ // Initialize spin lock for MSR programming
+ //
+ InitializeSpinLock (&mMsrLock);
+
+ LoadMtrrData (mAcpiCpuData.MtrrTable);
+
+ //
+ // Find processor number for this CPU.
+ //
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
+ InitApicId = GetInitialApicId ();
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {
+ SetProcessorRegister (&RegisterTableList[Index]);
+ break;
+ }
+ }
+
+ ProgramVirtualWireMode ();
+
+ PrepareAPStartupVector (mAcpiCpuData.StartupVector);
+
+ mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
+ mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
+
+ //
+ // Send INIT IPI - SIPI to all APs
+ //
+ SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
+
+ while (mNumberToFinish > 0) {
+ CpuPause ();
+ }
+}
+
+/**
+ The function is invoked after SMBASE relocation in S3 path to restors CPU status.
+
+ The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
+ data saved by normal boot path for both BSP and APs.
+
+**/
+VOID
+InitializeCpu (
+ VOID
+ )
+{
+ CPU_REGISTER_TABLE *RegisterTableList;
+ UINT32 InitApicId;
+ UINTN Index;
+
+ RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
+ InitApicId = GetInitialApicId ();
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
+ if (RegisterTableList[Index].InitialApicId == InitApicId) {
+ SetProcessorRegister (&RegisterTableList[Index]);
+ break;
+ }
+ }
+
+ mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
+ mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
+
+ //
+ // Send INIT IPI - SIPI to all APs
+ //
+ SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
+
+ while (mNumberToFinish > 0) {
+ CpuPause ();
+ }
+}
+
+/**
+ Restore SMM Configuration for Haswell enhance SMM features in S3 boot path.
+**/
+VOID
+RestoreSmmConfigurationInS3 (
+ VOID
+ )
+{
+ if (mRestoreSmmConfigurationInS3) {
+ //
+ // Configure SMM Code Access Check feature if available.
+ //
+ ConfigSmmCodeAccessCheck ();
+
+ mRestoreSmmConfigurationInS3 = FALSE;
+ }
+}
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.c
new file mode 100644
index 0000000000..1c75c1b67a
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.c
@@ -0,0 +1,484 @@
+/** @file
+ Implementation of SMM CPU Services Protocol.
+
+ Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#define CPUID_EXTENDED_TOPOLOGY 0xb
+#define CPUID_CACHE_PARAMS 0x4
+#define CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_INVALID 0x0
+#define CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_SMT 0x1
+#define CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_CORE 0x2
+
+extern EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[];
+
+//
+// SMM CPU Service Protocol instance
+//
+EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService = {
+ SmmGetProcessorInfo,
+ SmmSwitchBsp,
+ SmmAddProcessor,
+ SmmRemoveProcessor,
+ SmmWhoAmI,
+ SmmRegisterExceptionHandler
+};
+
+/**
+ Get Package ID/Core ID/Thread ID of a processor.
+ APIC ID must be an initial APIC ID.
+ The algorithm below assumes the target system has symmetry across physical package boundaries
+ with respect to the number of logical processors per package, number of cores per package.
+
+ @param[in] ApicId APIC ID of the target logical processor.
+ @param[out] Location Returns the processor location information.
+
+**/
+VOID
+SmmGetProcessorLocation (
+ IN UINT32 ApicId,
+ OUT EFI_CPU_PHYSICAL_LOCATION *Location
+ )
+{
+ UINTN ThreadBits;
+ UINTN CoreBits;
+ UINT32 RegEax;
+ UINT32 RegEbx;
+ UINT32 RegEcx;
+ UINT32 RegEdx;
+ UINT32 MaxCpuIdIndex;
+ UINT32 SubIndex;
+ UINTN LevelType;
+ UINT32 MaxLogicProcessorsPerPackage;
+ UINT32 MaxCoresPerPackage;
+
+ ASSERT (Location != NULL);
+
+ //
+ // Check if the processor is capable of supporting more than one logical processor.
+ //
+ AsmCpuid (EFI_CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
+ ASSERT ((RegEdx & BIT28) != 0);
+
+ //
+ // Assume three-level mapping of APIC ID: Package:Core:SMT.
+ //
+
+ //
+ // Get the max index of basic CPUID
+ //
+ AsmCpuid (EFI_CPUID_SIGNATURE, &MaxCpuIdIndex, NULL, NULL, NULL);
+
+ if (MaxCpuIdIndex >= CPUID_EXTENDED_TOPOLOGY) {
+ AsmCpuidEx (CPUID_EXTENDED_TOPOLOGY, 0, &RegEax, &RegEbx, NULL, NULL);
+ if ((RegEbx & 0xffff) != 0) {
+ //
+ // x2APIC ID
+ //
+
+ ThreadBits = RegEax & 0x1f;
+
+ CoreBits = 0;
+ SubIndex = 1;
+ do {
+ AsmCpuidEx (CPUID_EXTENDED_TOPOLOGY, SubIndex, &RegEax, NULL, &RegEcx, NULL);
+ LevelType = (RegEcx >> 8) & 0xff;
+ if (LevelType == CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_CORE) {
+ CoreBits = (RegEax & 0x1f) - ThreadBits;
+ break;
+ }
+ SubIndex++;
+ } while (LevelType != CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_INVALID);
+
+ Location->Thread = ApicId & ~((-1) << ThreadBits);
+ Location->Core = (ApicId >> ThreadBits) & ~((-1) << CoreBits);
+ Location->Package = (ApicId >> (ThreadBits+ CoreBits));
+ }
+ }
+ //
+ // xAPIC ID
+ //
+
+ AsmCpuid (EFI_CPUID_VERSION_INFO, NULL, &RegEbx, NULL, NULL);
+ MaxLogicProcessorsPerPackage = (RegEbx >> 16) & 0xff;
+ if (MaxCpuIdIndex >= CPUID_CACHE_PARAMS) {
+ AsmCpuidEx (CPUID_CACHE_PARAMS, 0, &RegEax, NULL, NULL, NULL);
+ MaxCoresPerPackage = (RegEax >> 26) + 1;
+ } else {
+ MaxCoresPerPackage = 1;
+ }
+
+ ThreadBits = (UINTN) (HighBitSet32 (MaxLogicProcessorsPerPackage / MaxCoresPerPackage - 1) + 1);
+ CoreBits = (UINTN) (HighBitSet32 (MaxCoresPerPackage - 1) + 1);
+
+ Location->Thread = ApicId & ~((-1) << ThreadBits);
+ Location->Core = (ApicId >> ThreadBits) & ~((-1) << CoreBits);
+ Location->Package = (ApicId >> (ThreadBits+ CoreBits));
+}
+
+/**
+ Gets processor information on the requested processor at the instant this call is made.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of processor.
+ @param[out] ProcessorInfoBuffer A pointer to the buffer where information for
+ the requested processor is deposited.
+
+ @retval EFI_SUCCESS Processor information was returned.
+ @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_NOT_FOUND The processor with the handle specified by
+ ProcessorNumber does not exist in the platform.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmGetProcessorInfo (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber,
+ OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer
+ )
+{
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber >= mMaxNumberOfCpus || ProcessorInfoBuffer == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Fill in processor information
+ //
+ CopyMem (ProcessorInfoBuffer, &gSmmCpuPrivate->ProcessorInfo[ProcessorNumber], sizeof (EFI_PROCESSOR_INFORMATION));
+ return EFI_SUCCESS;
+}
+
+/**
+ This service switches the requested AP to be the BSP since the next SMI.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of AP that is to become the new BSP.
+
+ @retval EFI_SUCCESS BSP will be switched in next SMI.
+ @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSwitchBsp (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ )
+{
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber >= mMaxNumberOfCpus) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {
+ return EFI_NOT_FOUND;
+ }
+
+ if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone ||
+ gSmst->CurrentlyExecutingCpu == ProcessorNumber) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Setting of the BSP for next SMI is pending until all SMI handlers are finished
+ //
+ gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuSwitchBsp;
+ return EFI_SUCCESS;
+}
+
+/**
+ Notify that a processor was hot-added.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorId Local APIC ID of the hot-added processor.
+ @param[out] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-addition of the specified processos was succesfully notified.
+ @retval EFI_UNSUPPORTED Hot addition of processor is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_ALREADY_STARTED The processor is already online in the system.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmAddProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINT64 ProcessorId,
+ OUT UINTN *ProcessorNumber
+ )
+{
+ UINTN Index;
+
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber == NULL || ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Check if the processor already exists
+ //
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ProcessorId) {
+ return EFI_ALREADY_STARTED;
+ }
+ }
+
+ //
+ // Check CPU hot plug data. The CPU RAS handler should have created the mapping
+ // of the APIC ID to SMBASE.
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (mCpuHotPlugData.ApicId[Index] == ProcessorId &&
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = ProcessorId;
+ gSmmCpuPrivate->ProcessorInfo[Index].StatusFlag = 0;
+ SmmGetProcessorLocation ((UINT32)ProcessorId, &gSmmCpuPrivate->ProcessorInfo[Index].Location);
+
+ *ProcessorNumber = Index;
+ gSmmCpuPrivate->Operation[Index] = SmmCpuAdd;
+ return EFI_SUCCESS;
+ }
+ }
+
+ return EFI_INVALID_PARAMETER;
+}
+
+/**
+ Notify that a processor was hot-removed.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-removal of the specified processos was succesfully notified.
+ @retval EFI_UNSUPPORTED Hot removal of processor is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of BSP is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmRemoveProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ )
+{
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber >= mMaxNumberOfCpus ||
+ gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Can't remove BSP
+ //
+ if (ProcessorNumber == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone) {
+ return EFI_UNSUPPORTED;
+ }
+
+ gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId = INVALID_APIC_ID;
+ mCpuHotPlugData.ApicId[ProcessorNumber] = INVALID_APIC_ID;
+
+ //
+ // Removal of the processor from the CPU list is pending until all SMI handlers are finished
+ //
+ gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuRemove;
+ return EFI_SUCCESS;
+}
+
+/**
+ This return the handle number for the calling processor.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[out] ProcessorNumber The handle number of currently executing processor.
+
+ @retval EFI_SUCCESS The current processor handle number was returned
+ in ProcessorNumber.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWhoAmI (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ OUT UINTN *ProcessorNumber
+ )
+{
+ UINTN Index;
+ UINT64 ApicId;
+
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ ApicId = GetApicId ();
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
+ *ProcessorNumber = Index;
+ return EFI_SUCCESS;
+ }
+ }
+ //
+ // This should not happen
+ //
+ ASSERT (FALSE);
+ return EFI_NOT_FOUND;
+}
+
+/**
+ Update the SMM CPU list per the pending operation.
+
+ This function is called after return from SMI handlers.
+**/
+VOID
+SmmCpuUpdate (
+ VOID
+ )
+{
+ UINTN Index;
+
+ //
+ // Handle pending BSP switch operations
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuSwitchBsp) {
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+ mSmmMpSyncData->SwitchBsp = TRUE;
+ mSmmMpSyncData->CandidateBsp[Index] = TRUE;
+ }
+ }
+
+ //
+ // Handle pending hot-add operations
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuAdd) {
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+ mNumberOfCpus++;
+ }
+ }
+
+ //
+ // Handle pending hot-remove operations
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+ mNumberOfCpus--;
+ }
+ }
+}
+
+/**
+ Register exception handler.
+
+ @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance.
+ @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and
+ the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL
+ of the UEFI 2.0 specification.
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER
+ that is called when a processor interrupt occurs.
+ If this parameter is NULL, then the handler will be uninstalled.
+
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed.
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed.
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmRegisterExceptionHandler (
+ IN EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler
+ )
+{
+ if (ExceptionType < 0 || ExceptionType >= EXCEPTION_VECTOR_NUMBER) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if (InterruptHandler == NULL && mExternalVectorTable[ExceptionType] == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (InterruptHandler != NULL && mExternalVectorTable[ExceptionType] != NULL) {
+ return EFI_ALREADY_STARTED;
+ }
+
+ mExternalVectorTable[ExceptionType] = InterruptHandler;
+ return EFI_SUCCESS;
+}
+
+/**
+ Initialize SMM CPU Services.
+ It installs EFI SMM CPU Services Protocol.
+
+ @param[in] Handle The firmware allocated handle for the EFI image.
+
+ @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully.
+
+**/
+EFI_STATUS
+InitializeSmmCpuServices (
+ IN EFI_HANDLE Handle
+ )
+{
+ EFI_STATUS Status;
+
+ Status = gSmst->SmmInstallProtocolInterface (
+ &Handle,
+ &gEfiSmmCpuServiceProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCpuService
+ );
+ ASSERT_EFI_ERROR (Status);
+ return Status;
+}
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.h
new file mode 100644
index 0000000000..18cc8f8be2
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/CpuService.h
@@ -0,0 +1,198 @@
+/** @file
+ Include file for SMM RAS Services protocol implementation.
+
+ Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _CPU_SERVICE_H_
+#define _CPU_SERVICE_H_
+
+typedef enum {
+ SmmCpuNone,
+ SmmCpuAdd,
+ SmmCpuRemove,
+ SmmCpuSwitchBsp
+} SMM_CPU_OPERATION;
+
+//
+// SMM CPU Service Protocol function prototypes.
+//
+
+/**
+ Gets processor information on the requested processor at the instant this call is made.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of processor.
+ @param[out] ProcessorInfoBuffer A pointer to the buffer where information for
+ the requested processor is deposited.
+
+ @retval EFI_SUCCESS Processor information was returned.
+ @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_NOT_FOUND The processor with the handle specified by
+ ProcessorNumber does not exist in the platform.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmGetProcessorInfo (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber,
+ OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer
+ );
+
+/**
+ This service switches the requested AP to be the BSP since the next SMI.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of AP that is to become the new BSP.
+
+ @retval EFI_SUCCESS BSP will be switched in next SMI.
+ @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSwitchBsp (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ );
+
+/**
+ Notify that a processor was hot-added.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorId Local APIC ID of the hot-added processor.
+ @param[out] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-addition of the specified processos was succesfully notified.
+ @retval EFI_UNSUPPORTED Hot addition of processor is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_ALREADY_STARTED The processor is already online in the system.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmAddProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINT64 ProcessorId,
+ OUT UINTN *ProcessorNumber
+ );
+
+/**
+ Notify that a processor was hot-removed.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-removal of the specified processos was succesfully notified.
+ @retval EFI_UNSUPPORTED Hot removal of processor is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of BSP is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmRemoveProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ );
+
+/**
+ This return the handle number for the calling processor.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[out] ProcessorNumber The handle number of currently executing processor.
+
+ @retval EFI_SUCCESS The current processor handle number was returned
+ in ProcessorNumber.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWhoAmI (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ OUT UINTN *ProcessorNumber
+ );
+
+/**
+ Register exception handler.
+
+ @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance.
+ @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and
+ the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL
+ of the UEFI 2.0 specification.
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER
+ that is called when a processor interrupt occurs.
+ If this parameter is NULL, then the handler will be uninstalled.
+
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed.
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed.
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmRegisterExceptionHandler (
+ IN EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler
+ );
+
+//
+// Internal function prototypes
+//
+
+/**
+ Initializes the pointer to the external exception vector table.
+
+ @param VectorTable Address of the external exception vector table.
+
+**/
+VOID
+EFIAPI
+InitializeSmmExternalVectorTablePtr (
+ EFI_CPU_INTERRUPT_HANDLER *VectorTable
+ );
+
+/**
+ Update the SMM CPU list per the pending operation.
+
+ This function is called after return from SMI handlers.
+**/
+VOID
+SmmCpuUpdate (
+ VOID
+ );
+
+/**
+ Initialize SMM CPU Services.
+ It installs EFI SMM CPU Services Protocol.
+
+ @param ImageHandle The firmware allocated handle for the EFI image.
+
+ @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully.
+
+**/
+EFI_STATUS
+InitializeSmmCpuServices (
+ IN EFI_HANDLE Handle
+ );
+
+#endif
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.S
new file mode 100644
index 0000000000..7d29b44385
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.S
@@ -0,0 +1,159 @@
+## @file
+# This is the assembly code for Multi-processor S3 support
+#
+# Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+.equ VacantFlag, 0x0
+.equ NotVacantFlag, 0xff
+
+.equ LockLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+.equ StackStart, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x04
+.equ StackSize, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x08
+.equ RendezvousProc, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x0C
+.equ GdtrProfile, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x10
+.equ IdtrProfile, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x16
+.equ BufferStart, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x1C
+
+#-------------------------------------------------------------------------------------
+#RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+#procedure serializes all the AP processors through an Init sequence. It must be
+#noted that APs arrive here very raw...ie: real mode, no stack.
+#ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+#IS IN MACHINE CODE.
+#-------------------------------------------------------------------------------------
+#RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+ASM_GLOBAL ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+# At this point CS = 0x(vv00) and ip= 0x0.
+
+ .byte 0x8c,0xc8 # mov ax, cs
+ .byte 0x8e,0xd8 # mov ds, ax
+ .byte 0x8e,0xc0 # mov es, ax
+ .byte 0x8e,0xd0 # mov ss, ax
+ .byte 0x33,0xc0 # xor ax, ax
+ .byte 0x8e,0xe0 # mov fs, ax
+ .byte 0x8e,0xe8 # mov gs, ax
+
+flat32Start:
+
+ .byte 0xBE
+ .word BufferStart
+ .byte 0x66,0x8B,0x14 # mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
+
+ .byte 0xBE
+ .word GdtrProfile
+ .byte 0x66 # db 66h
+ .byte 0x2E,0xF,0x1,0x14 # lgdt fword ptr cs:[si]
+
+ .byte 0xBE
+ .word IdtrProfile
+ .byte 0x66 # db 66h
+ .byte 0x2E,0xF,0x1,0x1C # lidt fword ptr cs:[si]
+
+ .byte 0x33,0xC0 # xor ax, ax
+ .byte 0x8E,0xD8 # mov ds, ax
+
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Get control register 0
+ .byte 0x66,0x83,0xC8,0x1 # or eax, 000000001h ; Set PE bit (bit #0)
+ .byte 0xF,0x22,0xC0 # mov cr0, eax
+
+FLAT32_JUMP:
+
+ .byte 0x66,0x67,0xEA # far jump
+ .long 0x0 # 32-bit offset
+ .word 0x20 # 16-bit selector
+
+PMODE_ENTRY: # protected mode entry point
+
+ movw $0x8,%ax
+ .byte 0x66
+ movw %ax,%ds
+ .byte 0x66
+ movw %ax,%es
+ .byte 0x66
+ movw %ax,%fs
+ .byte 0x66
+ movw %ax,%gs
+ .byte 0x66
+ movw %ax,%ss # Flat mode setup.
+
+ movl %edx,%esi
+
+ movl %esi,%edi
+ addl $LockLocation, %edi
+ movb $NotVacantFlag, %al
+TestLock:
+ xchgb (%edi), %al
+ cmpb $NotVacantFlag, %al
+ jz TestLock
+
+ProgramStack:
+
+ movl %esi,%edi
+ addl $StackSize, %edi
+ movl (%edi),%eax
+ movl %esi,%edi
+ addl $StackStart, %edi
+ addl (%edi),%eax
+ movl %eax,%esp
+ movl %eax,(%edi)
+
+Releaselock:
+
+ movb $VacantFlag, %al
+ movl %esi,%edi
+ addl $LockLocation, %edi
+ xchgb (%edi), %al
+
+ #
+ # Call assembly function to initialize FPU.
+ #
+ lea ASM_PFX(InitializeFloatingPointUnits), %ebx
+ call *%ebx
+ #
+ # Call C Function
+ #
+ movl %esi,%edi
+ addl $RendezvousProc, %edi
+ movl (%edi),%eax
+
+ testl %eax,%eax
+ jz GoToSleep
+ call *%eax # Call C function
+
+GoToSleep:
+ cli
+ hlt
+ jmp GoToSleep
+
+RendezvousFunnelProcEnd:
+#-------------------------------------------------------------------------------------
+# AsmGetAddressMap (&AddressMap);
+#-------------------------------------------------------------------------------------
+ASM_GLOBAL ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+
+ pushal
+ movl %esp,%ebp
+
+ movl 0x24(%ebp), %ebx
+ movl $RendezvousFunnelProcStart, (%ebx)
+ movl $(PMODE_ENTRY - RendezvousFunnelProcStart), 0x4(%ebx)
+ movl $(FLAT32_JUMP - RendezvousFunnelProcStart), 0x8(%ebx)
+ movl $(RendezvousFunnelProcEnd - RendezvousFunnelProcStart), 0x0c(%ebx)
+
+ popal
+ ret
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm
new file mode 100644
index 0000000000..9cfb54e4fa
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm
@@ -0,0 +1,163 @@
+;; @file
+; This is the assembly code for Multi-processor S3 support
+;
+; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+.686p
+.model flat,C
+.code
+
+EXTERN InitializeFloatingPointUnits:PROC
+
+VacantFlag Equ 00h
+NotVacantFlag Equ 0ffh
+
+LockLocation equ RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+StackStart equ LockLocation + 4h
+StackSize equ LockLocation + 8h
+RendezvousProc equ LockLocation + 0Ch
+GdtrProfile equ LockLocation + 10h
+IdtrProfile equ LockLocation + 16h
+BufferStart equ LockLocation + 1Ch
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+RendezvousFunnelProc PROC near C PUBLIC
+RendezvousFunnelProcStart::
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ db 8ch, 0c8h ; mov ax, cs
+ db 8eh, 0d8h ; mov ds, ax
+ db 8eh, 0c0h ; mov es, ax
+ db 8eh, 0d0h ; mov ss, ax
+ db 33h, 0c0h ; xor ax, ax
+ db 8eh, 0e0h ; mov fs, ax
+ db 8eh, 0e8h ; mov gs, ax
+
+flat32Start::
+
+ db 0BEh
+ dw BufferStart ; mov si, BufferStart
+ db 66h, 8Bh, 14h ; mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
+
+ db 0BEh
+ dw GdtrProfile ; mov si, GdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 14h ; lgdt fword ptr cs:[si]
+
+ db 0BEh
+ dw IdtrProfile ; mov si, IdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 1Ch ; lidt fword ptr cs:[si]
+
+ db 33h, 0C0h ; xor ax, ax
+ db 8Eh, 0D8h ; mov ds, ax
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0
+ db 66h, 83h, 0C8h, 01h ; or eax, 000000001h ; Set PE bit (bit #0)
+ db 0Fh, 22h, 0C0h ; mov cr0, eax
+
+FLAT32_JUMP::
+
+ db 66h, 67h, 0EAh ; far jump
+ dd 0h ; 32-bit offset
+ dw 20h ; 16-bit selector
+
+PMODE_ENTRY:: ; protected mode entry point
+
+ mov ax, 8h
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax ; Flat mode setup.
+
+ mov esi, edx
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock::
+ xchg byte ptr [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack::
+
+ mov edi, esi
+ add edi, StackSize
+ mov eax, dword ptr [edi]
+ mov edi, esi
+ add edi, StackStart
+ add eax, dword ptr [edi]
+ mov esp, eax
+ mov dword ptr [edi], eax
+
+Releaselock::
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte ptr [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov ebx, InitializeFloatingPointUnits
+ call ebx
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, RendezvousProc
+ mov eax, dword ptr [edi]
+
+ test eax, eax
+ jz GoToSleep
+ call eax ; Call C function
+
+GoToSleep::
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProc ENDP
+RendezvousFunnelProcEnd::
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+AsmGetAddressMap PROC near C PUBLIC
+
+ pushad
+ mov ebp,esp
+
+ mov ebx, dword ptr [ebp+24h]
+ mov dword ptr [ebx], RendezvousFunnelProcStart
+ mov dword ptr [ebx+4h], PMODE_ENTRY - RendezvousFunnelProcStart
+ mov dword ptr [ebx+8h], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov dword ptr [ebx+0ch], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+
+ popad
+ ret
+
+AsmGetAddressMap ENDP
+
+END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/PageTbl.c
new file mode 100644
index 0000000000..264b4533d1
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -0,0 +1,96 @@
+/** @file
+ Page table manipulation functions for IA-32 processors
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+SPIN_LOCK mPFLock;
+
+/**
+ Create PageTable for SMM use.
+
+ @return PageTable Address
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (&mPFLock);
+
+ //
+ // Register Smm Page Fault Handler
+ //
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+
+ return Gen4GPageTable (0);
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ CpuDeadLoop ();
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param[in] InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param[in] SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (&mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
+ CpuDeadLoop ();
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextIa32->Eip,
+ SystemContext.SystemContextIa32->ExceptionData
+ );
+ } else {
+ SmiDefaultPFHandler ();
+ }
+
+ ReleaseSpinLock (&mPFLock);
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/Semaphore.c
new file mode 100644
index 0000000000..30e56083f5
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/Semaphore.c
@@ -0,0 +1,62 @@
+/** @file
+ Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+ after SMBASE relocation.
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+UINTN mSmmRelocationOriginalAddress;
+BOOLEAN *mRebasedFlag;
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param CpuIndex The processor index.
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex
+ )
+{
+ SOCKET_LGA_775_SMM_CPU_STATE *CpuState;
+
+ mRebasedFlag = (BOOLEAN *) &mRebased[CpuIndex];
+
+ {
+ CpuState = (SOCKET_LGA_775_SMM_CPU_STATE *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_CPU_STATE_OFFSET);
+
+ //
+ // The offset of EIP/RIP is different depending on the SMMRevId
+ //
+ if (CpuState->x86.SMMRevId < SOCKET_LGA_775_SMM_MIN_REV_ID_x64) {
+ mSmmRelocationOriginalAddress = (UINTN) CpuState->x86._EIP;
+ CpuState->x86._EIP = (UINT32) (UINTN) &SmmRelocationSemaphoreComplete;
+ } else {
+ mSmmRelocationOriginalAddress = (UINTN) CpuState->x64._RIP;
+ CpuState->x64._RIP = (UINT64) (UINTN) &SmmRelocationSemaphoreComplete;
+ }
+
+ if (CpuState->x86.AutoHALTRestart & BIT0) {
+ //
+ // Clear the auto HALT restart flag so the RSM instruction returns
+ // program control to the instruction following the HLT instruction,
+ // actually returns to SmmRelocationSemaphoreComplete
+ //
+ CpuState->x86.AutoHALTRestart &= ~BIT0;
+ }
+ }
+}
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
new file mode 100644
index 0000000000..f2f81a1991
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
@@ -0,0 +1,153 @@
+## @file
+# Code template of the SMI handler for a particular processor
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+
+ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)
+ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
+ASM_GLOBAL ASM_PFX(gSmiCr3)
+ASM_GLOBAL ASM_PFX(gcSmiHandlerOffset)
+ASM_GLOBAL ASM_PFX(gSmiStack)
+ASM_GLOBAL ASM_PFX(gSmbase)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+
+.equ DSC_OFFSET, 0xfb00
+.equ DSC_GDTPTR, 0x30
+.equ DSC_GDTSIZ, 0x38
+.equ DSC_CS, 14
+.equ DSC_DS, 16
+.equ DSC_SS, 18
+.equ DSC_OTHERSEG, 20
+
+.equ MSR_DR6, 0x0c05
+.equ MSR_DR7, 0x0c06
+
+.equ TSS_SEGMENT, 0x40
+
+ .data
+
+ ASM_PFX(gcSmiHandlerOffset): .word _SmiHandler - _SmiEntryPoint + 0x8000
+
+ .text
+
+ASM_PFX(gcSmiHandlerTemplate):
+
+_SmiEntryPoint:
+ .byte 0xbb # mov bx, imm16
+ .word _GdtDesc - _SmiEntryPoint + 0x8000
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTSIZ
+ decl %eax
+ movl %eax, %cs:(%edi) # mov cs:[bx], ax
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTPTR
+ movw %ax, %cs:2(%edi)
+ movw %ax, %bp # ebp = GDT base
+ .byte 0x66
+ lgdt %cs:(%edi)
+ .byte 0x66,0xb8 # mov eax, imm32
+ASM_PFX(gSmiCr3): .space 4
+ movl %eax, %cr3
+ .byte 0x66
+ movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_CS
+ movl %eax, %cs:-2(%edi)
+ .byte 0x66, 0xbf # mov edi, SMBASE
+ASM_PFX(gSmbase): .space 4
+ .byte 0x67
+ lea ((Start32bit - _SmiEntryPoint) + 0x8000)(%edi), %ax
+ movw %ax, %cs:-6(%edi)
+ movl %cr0, %ebx
+ .byte 0x66
+ andl $0x9ffafff3, %ebx
+ .byte 0x66
+ orl $0x80000023, %ebx
+ movl %ebx, %cr0
+ .byte 0x66,0xea
+ .space 4
+ .space 2
+_GdtDesc: .space 4
+ .space 2
+Start32bit:
+ leal DSC_OFFSET(%edi),%ebx
+ movw DSC_DS(%ebx),%ax
+ movl %eax,%ds
+ movw DSC_OTHERSEG(%ebx),%ax
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+ movw DSC_SS(%ebx),%ax
+ movl %eax,%ss
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ jz L5
+
+# Load TSS
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
+
+ movl $TSS_SEGMENT, %eax
+ ltrw %ax
+L5:
+
+# jmp _SmiHandler # instruction is not needed
+
+_SmiHandler:
+ .byte 0xbc # mov esp, imm32
+ASM_PFX(gSmiStack): .space 4
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+ jz L3
+ jz L6
+
+L6:
+ call L1
+L1:
+ popl %ebp
+ movl $0x80000001, %eax
+ cpuid
+ btl $29, %edx # check cpuid to identify X64 or IA32
+ leal (0x7fc8 - (L1 - _SmiEntryPoint))(%ebp), %edi
+ leal 4(%edi), %esi
+ jnc L2
+ addl $4, %esi
+L2:
+ movl (%esi), %ecx
+ movl (%edi), %edx
+L7:
+ movl %ecx, %dr6
+ movl %edx, %dr7 # restore DR6 & DR7 before running C code
+L3:
+
+ pushl (%esp)
+
+ movl $ASM_PFX(SmiRendezvous), %eax
+ call *%eax
+ popl %ecx
+
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+ jz L4
+
+ movl %dr6, %ecx
+ movl %dr7, %edx
+ movl %ecx, (%esi)
+ movl %edx, (%edi)
+L4:
+
+ rsm
+
+ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
new file mode 100644
index 0000000000..3bd34376f8
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
@@ -0,0 +1,156 @@
+;; @file
+; Code template of the SMI handler for a particular processor
+;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+ .686p
+ .model flat,C
+ .xmm
+
+DSC_OFFSET EQU 0fb00h
+DSC_GDTPTR EQU 30h
+DSC_GDTSIZ EQU 38h
+DSC_CS EQU 14
+DSC_DS EQU 16
+DSC_SS EQU 18
+DSC_OTHERSEG EQU 20
+MSR_DR6 EQU 0c05h
+MSR_DR7 EQU 0c06h
+
+TSS_SEGMENT EQU 40h
+
+SmiRendezvous PROTO C
+
+EXTERNDEF gcSmiHandlerTemplate:BYTE
+EXTERNDEF gcSmiHandlerSize:WORD
+EXTERNDEF gSmiCr3:DWORD
+EXTERNDEF gcSmiHandlerOffset:WORD
+EXTERNDEF gSmiStack:DWORD
+EXTERNDEF gSmbase:DWORD
+EXTERNDEF FeaturePcdGet (PcdCpuSmmDebug):BYTE
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
+
+ .const
+
+gcSmiHandlerOffset DW _SmiHandler - _SmiEntryPoint + 8000h
+
+ .code
+
+gcSmiHandlerTemplate LABEL BYTE
+
+_SmiEntryPoint PROC
+ DB 0bbh ; mov bx, imm16
+ DW offset _GdtDesc - _SmiEntryPoint + 8000h
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTSIZ
+ dec eax
+ mov cs:[edi], eax ; mov cs:[bx], ax
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTPTR
+ mov cs:[edi + 2], ax ; mov cs:[bx + 2], eax
+ mov bp, ax ; ebp = GDT base
+ DB 66h
+ lgdt fword ptr cs:[edi] ; lgdt fword ptr cs:[bx]
+ DB 66h, 0b8h ; mov eax, imm32
+gSmiCr3 DD ?
+ mov cr3, eax
+ DB 66h
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_CS
+ mov cs:[edi - 2], eax ; mov cs:[bx - 2], ax
+ DB 66h, 0bfh ; mov edi, SMBASE
+gSmbase DD ?
+ DB 67h
+ lea ax, [edi + (@32bit - _SmiEntryPoint) + 8000h]
+ mov cs:[edi - 6], ax ; mov cs:[bx - 6], eax
+ mov ebx, cr0
+ DB 66h
+ and ebx, 9ffafff3h
+ DB 66h
+ or ebx, 80000023h
+ mov cr0, ebx
+ DB 66h, 0eah
+ DD ?
+ DW ?
+_GdtDesc FWORD ?
+@32bit:
+ lea ebx, [edi + DSC_OFFSET]
+ mov ax, [ebx + DSC_DS]
+ mov ds, eax
+ mov ax, [ebx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [ebx + DSC_SS]
+ mov ss, eax
+
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
+ jz @F
+
+; Load TSS
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
+
+ mov eax, TSS_SEGMENT
+ ltr ax
+@@:
+; jmp _SmiHandler ; instruction is not needed
+_SmiEntryPoint ENDP
+
+_SmiHandler PROC
+ DB 0bch ; mov esp, imm32
+gSmiStack DD ?
+ cmp FeaturePcdGet (PcdCpuSmmDebug), 0
+ jz @3
+ jz @F
+
+@@:
+ call @1
+@1:
+ pop ebp
+ mov eax, 80000001h
+ cpuid
+ bt edx, 29 ; check cpuid to identify X64 or IA32
+ lea edi, [ebp - (@1 - _SmiEntryPoint) + 7fc8h]
+ lea esi, [edi + 4]
+ jnc @2
+ add esi, 4
+@2:
+ mov ecx, [esi]
+ mov edx, [edi]
+@5:
+ mov dr6, ecx
+ mov dr7, edx ; restore DR6 & DR7 before running C code
+@3:
+ mov ecx, [esp] ; CPU Index
+
+ push ecx
+ mov eax, SmiRendezvous
+ call eax
+ pop ecx
+
+ cmp FeaturePcdGet (PcdCpuSmmDebug), 0
+ jz @4
+
+ mov ecx, dr6
+ mov edx, dr7
+ mov [esi], ecx
+ mov [edi], edx
+@4:
+ rsm
+_SmiHandler ENDP
+
+gcSmiHandlerSize DW $ - _SmiEntryPoint
+
+ END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.S
new file mode 100644
index 0000000000..4650437d71
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.S
@@ -0,0 +1,1172 @@
+## @file
+# Exception handlers used in SM mode
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+
+ASM_GLOBAL ASM_PFX(gSmiMtrrs)
+ASM_GLOBAL ASM_PFX(gcSmiIdtr)
+ASM_GLOBAL ASM_PFX(gcSmiGdtr)
+ASM_GLOBAL ASM_PFX(gcPsd)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ASM_GLOBAL ASM_PFX(gSavedPageFaultIdtEntry)
+ASM_GLOBAL ASM_PFX(gSavedDebugExceptionIdtEntry)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+ASM_GLOBAL ASM_PFX(InitializeSmmExternalVectorTablePtr)
+
+ .data
+
+NullSeg: .quad 0
+ .quad 0 # reserved for future use
+CodeSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+DataSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x93
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+ .quad 0 # reserved for future use
+CodeSeg16:
+ .word -1
+ .word 0
+ .byte 0
+ .byte 0x9b
+ .byte 0x8f
+ .byte 0
+DataSeg16:
+ .word -1
+ .word 0
+ .byte 0
+ .byte 0x93
+ .byte 0x8f
+ .byte 0
+CodeSeg64:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xaf # LimitHigh
+ .byte 0 # BaseHigh
+.equ GDT_SIZE, .- NullSeg
+
+TssSeg:
+ .word TSS_DESC_SIZE # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x89
+ .byte 0x80 # LimitHigh
+ .byte 0 # BaseHigh
+ExceptionTssSeg:
+ .word TSS_DESC_SIZE # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x89
+ .byte 0x80 # LimitHigh
+ .byte 0 # BaseHigh
+
+.equ CODE_SEL, CodeSeg32 - NullSeg
+.equ DATA_SEL, DataSeg32 - NullSeg
+.equ TSS_SEL, TssSeg - NullSeg
+.equ EXCEPTION_TSS_SEL, ExceptionTssSeg - NullSeg
+
+# IA32 TSS fields
+.equ TSS_ESP0, 4
+.equ TSS_SS0, 8
+.equ TSS_ESP1, 12
+.equ TSS_SS1, 16
+.equ TSS_ESP2, 20
+.equ TSS_SS2, 24
+.equ TSS_CR3, 28
+.equ TSS_EIP, 32
+.equ TSS_EFLAGS, 36
+.equ TSS_EAX, 40
+.equ TSS_ECX, 44
+.equ TSS_EDX, 48
+.equ TSS_EBX, 52
+.equ TSS_ESP, 56
+.equ TSS_EBP, 60
+.equ TSS_ESI, 64
+.equ TSS_EDI, 68
+.equ TSS_ES, 72
+.equ TSS_CS, 76
+.equ TSS_SS, 80
+.equ TSS_DS, 84
+.equ TSS_FS, 88
+.equ TSS_GS, 92
+.equ TSS_LDT, 96
+
+# Create 2 TSS segments just after GDT
+TssDescriptor:
+ .word 0 # PreviousTaskLink
+ .word 0 # Reserved
+ .long 0 # ESP0
+ .word 0 # SS0
+ .word 0 # Reserved
+ .long 0 # ESP1
+ .word 0 # SS1
+ .word 0 # Reserved
+ .long 0 # ESP2
+ .word 0 # SS2
+ .word 0 # Reserved
+ .long 0 # CR3
+ .long 0 # EIP
+ .long 0 # EFLAGS
+ .long 0 # EAX
+ .long 0 # ECX
+ .long 0 # EDX
+ .long 0 # EBX
+ .long 0 # ESP
+ .long 0 # EBP
+ .long 0 # ESI
+ .long 0 # EDI
+ .word 0 # ES
+ .word 0 # Reserved
+ .word 0 # CS
+ .word 0 # Reserved
+ .word 0 # SS
+ .word 0 # Reserved
+ .word 0 # DS
+ .word 0 # Reserved
+ .word 0 # FS
+ .word 0 # Reserved
+ .word 0 # GS
+ .word 0 # Reserved
+ .word 0 # LDT Selector
+ .word 0 # Reserved
+ .word 0 # T
+ .word 0 # I/O Map Base
+.equ TSS_DESC_SIZE, . - TssDescriptor
+
+ExceptionTssDescriptor:
+ .word 0 # PreviousTaskLink
+ .word 0 # Reserved
+ .long 0 # ESP0
+ .word 0 # SS0
+ .word 0 # Reserved
+ .long 0 # ESP1
+ .word 0 # SS1
+ .word 0 # Reserved
+ .long 0 # ESP2
+ .word 0 # SS2
+ .word 0 # Reserved
+ .long 0 # CR3
+ .long PFHandlerEntry # EIP
+ .long 00000002 # EFLAGS
+ .long 0 # EAX
+ .long 0 # ECX
+ .long 0 # EDX
+ .long 0 # EBX
+ .long 0 # ESP
+ .long 0 # EBP
+ .long 0 # ESI
+ .long 0 # EDI
+ .word DATA_SEL # ES
+ .word 0 # Reserved
+ .word CODE_SEL # CS
+ .word 0 # Reserved
+ .word DATA_SEL # SS
+ .word 0 # Reserved
+ .word DATA_SEL # DS
+ .word 0 # Reserved
+ .word DATA_SEL # FS
+ .word 0 # Reserved
+ .word DATA_SEL # GS
+ .word 0 # Reserved
+ .word 0 # LDT Selector
+ .word 0 # Reserved
+ .word 0 # T
+ .word 0 # I/O Map Base
+
+ASM_PFX(gcPsd):
+ .ascii "PSDSIG "
+ .word PSD_SIZE
+ .word 2
+ .word 1 << 2
+ .word CODE_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word 0
+ .long 0
+ .long 0
+ .long 0
+ .long 0
+ .quad 0
+ .long NullSeg
+ .long 0
+ .long GDT_SIZE
+ .long 0
+ .space 24, 0
+ .long ASM_PFX(gSmiMtrrs)
+ .long 0
+.equ PSD_SIZE, . - ASM_PFX(gcPsd)
+
+ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1
+ .long NullSeg
+
+ASM_PFX(gcSmiIdtr): .word IDT_SIZE - 1
+ .long _SmiIDT
+
+_SmiIDT:
+# The following segment repeats 32 times:
+# No. 1
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 2
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 3
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 4
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 5
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 6
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 7
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 8
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 9
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 10
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 11
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 12
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 13
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 14
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 15
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 16
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 17
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 18
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 19
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 20
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 21
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 22
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 23
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 24
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 25
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 26
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 27
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 28
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 29
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 30
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 31
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 32
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+
+.equ IDT_SIZE, . - _SmiIDT
+
+TaskGateDescriptor:
+ .word 0 # Reserved
+ .word EXCEPTION_TSS_SEL # TSS Segment selector
+ .byte 0 # Reserved
+ .byte 0x85 # Task Gate, present, DPL = 0
+ .word 0 # Reserved
+
+# point to the external interrupt vector table
+
+ExternalVectorTablePtr: .long 0
+
+#
+# Saved IDT Entry for Page Fault
+#
+ASM_PFX(gSavedPageFaultIdtEntry):
+ .long 0
+ .long 0
+
+#
+# Saved IDT Entry for INT 1
+#
+ASM_PFX(gSavedDebugExceptionIdtEntry):
+ .long 0
+ .long 0
+
+ .text
+
+ASM_PFX(InitializeSmmExternalVectorTablePtr):
+ movl 4(%esp), %eax
+ movl %eax, ExternalVectorTablePtr
+ ret
+
+#------------------------------------------------------------------------------
+# Exception handlers
+#------------------------------------------------------------------------------
+_SmiExceptionHandlers:
+.equ IHDLRIDX, 0
+# The following segment repeats 8 times:
+# No. 1
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 2
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 3
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 4
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 5
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 6
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 7
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 8
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+
+# The following segment repeats 5 times:
+# No. 1
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 2
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 3
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 4
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 5
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+
+# The following segment repeats 2 times:
+# No. 1
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 2
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+ int $3
+.equ IHDLRIDX, IHDLRIDX + 1
+
+# The following segment repeats 14 times:
+# No. 1
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 2
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 3
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 4
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 5
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 6
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 7
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 8
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 9
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 10
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 11
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 12
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 13
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 14
+ pushl %eax # dummy error code
+ pushl $IHDLRIDX
+ .byte 0xe9 # jmp disp32
+ .long _SmiExceptionEntryPoint - (. + 4)
+.equ IHDLRIDX, IHDLRIDX + 1
+
+
+#------------------------------------------------------------------------------
+# _SmiExceptionEntryPoint is the entry point for all exceptions
+#
+# Stack:
+#+---------------------+
+#+ EFlags +
+#+---------------------+
+#+ CS +
+#+---------------------+
+#+ EIP +
+#+---------------------+
+#+ Error Code +
+#+---------------------+
+#+ Vector Number +
+#+---------------------+
+#+ EBP +
+#+---------------------+ <-- EBP
+#
+# RSP set to odd multiple of 8 means ErrCode PRESENT
+#------------------------------------------------------------------------------
+_SmiExceptionEntryPoint:
+ pushl %ebp
+ movl %esp, %ebp
+
+
+ #
+ # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ # is 16-byte aligned
+ #
+ andl $0xfffffff0, %esp
+ subl $12, %esp
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushl %ebx
+ leal (6*4)(%ebp), %ecx
+ pushl %ecx # ESP
+ pushl (%ebp) # EBP
+ pushl %esi
+ pushl %edi
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movl %ss, %eax
+ pushl %eax
+ movzwl (4*4)(%ebp), %eax
+ pushl %eax
+ movl %ds, %eax
+ pushl %eax
+ movl %es, %eax
+ pushl %eax
+ movl %fs, %eax
+ pushl %eax
+ movl %gs, %eax
+ pushl %eax
+
+## UINT32 Eip;
+ movl (3*4)(%ebp), %eax
+ pushl %eax
+
+## UINT32 Gdtr[2], Idtr[2];
+ subl $8, %esp
+ sidt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xffff, %eax
+ movl %eax, 4(%esp)
+
+ subl $8, %esp
+ sgdt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xffff, %eax
+ movl %eax, 4(%esp)
+
+## UINT32 Ldtr, Tr;
+ xorl %eax, %eax
+ strw %ax
+ pushl %eax
+ sldtw %ax
+ pushl %eax
+
+## UINT32 EFlags;
+ movl (5*4)(%ebp), %eax
+ pushl %eax
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ movl %cr4, %eax
+ orl $0x208, %eax
+ movl %eax, %cr4
+ pushl %eax
+ movl %cr3, %eax
+ pushl %eax
+ movl %cr2, %eax
+ pushl %eax
+ xorl %eax, %eax
+ pushl %eax
+ movl %cr0, %eax
+ pushl %eax
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ movl %dr7, %eax
+ pushl %eax
+ movl %dr6, %eax
+ pushl %eax
+ movl %dr3, %eax
+ pushl %eax
+ movl %dr2, %eax
+ pushl %eax
+ movl %dr1, %eax
+ pushl %eax
+ movl %dr0, %eax
+ pushl %eax
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ subl $512, %esp
+ movl %esp, %edi
+ .byte 0x0f, 0xae, 0x07 #fxsave [edi]
+
+# UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+## UINT32 ExceptionData;
+ pushl (2*4)(%ebp)
+
+## call into exception handler
+ movl ExternalVectorTablePtr, %eax # get the interrupt vectors base
+ orl %eax, %eax # NULL?
+ jz nullExternalExceptionHandler
+
+ movl 4(%ebp), %ecx
+ movl (%eax, %ecx, 4), %eax
+ orl %eax, %eax # NULL?
+ jz nullExternalExceptionHandler
+
+## Prepare parameter and call
+ movl %esp, %edx
+ pushl %edx
+ movl (1*4)(%ebp), %edx
+ pushl %edx
+
+ #
+ # Call External Exception Handler
+ #
+ call *%eax
+ addl $8, %esp
+ jmp L4
+
+nullExternalExceptionHandler:
+# CpuDeadLoop() is the default exception handler since it preserves the processor
+# branch log.
+ call ASM_PFX(CpuDeadLoop)
+
+L4:
+## UINT32 ExceptionData;
+ addl $4, %esp
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ movl %esp, %esi
+ .byte 0xf, 0xae, 0xe # fxrstor [esi]
+ addl $512, %esp
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+## Skip restoration of DRx registers to support in-circuit emualators
+## or debuggers set breakpoint in interrupt/exception context
+ addl $4*6, %esp
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ popl %eax
+ movl %eax, %cr0
+ addl $4, %esp # not for Cr1
+ popl %eax
+ movl %eax, %cr2
+ popl %eax
+ movl %eax, %cr3
+ popl %eax
+ movl %eax, %cr4
+
+## UINT32 EFlags;
+ popl (5*4)(%ebp)
+
+## UINT32 Ldtr, Tr;
+## UINT32 Gdtr[2], Idtr[2];
+## Best not let anyone mess with these particular registers...
+ addl $24, %esp
+
+## UINT32 Eip;
+ popl (3*4)(%ebp)
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+## NOTE - modified segment registers could hang the debugger... We
+## could attempt to insulate ourselves against this possibility,
+## but that poses risks as well.
+##
+ popl %gs
+ popl %fs
+ popl %es
+ popl %ds
+ popl (4*4)(%ebp)
+ popl %ss
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ popl %edi
+ popl %esi
+ addl $4, %esp # not for ebp
+ addl $4, %esp # not for esp
+ popl %ebx
+ popl %edx
+ popl %ecx
+ popl %eax
+
+ movl %ebp, %esp
+ popl %ebp
+
+# Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+ jz Done
+# Check if this is page fault exception
+ cmpl $0xe, (%esp)
+ jnz L5
+# Enable TF bit after page fault handler runs
+ btsl $8, 16(%esp) # EFLAGS
+ jmp Done
+L5:
+# Check if this is INT 1 exception
+ cmpl $1, (%esp)
+ jnz Done
+# Clear TF bit after INT1 handler runs
+ btcl $8, 16(%esp) # EFLAGS
+Done:
+
+ addl $8, %esp # skip INT# & ErrCode
+Return:
+ iret
+#
+# Page Fault Exception Handler entry when SMM Stack Guard is enabled
+# Executiot starts here after a task switch
+#
+PFHandlerEntry:
+#
+# Get this processor's TSS
+#
+ subl $8, %esp
+ sgdt 2(%esp)
+ movl 4(%esp), %eax # GDT base
+ addl $8, %esp
+ movl (TSS_SEL+2)(%eax), %ecx
+ shll $8, %ecx
+ movb (TSS_SEL+7)(%eax), %cl
+ rorl $8, %ecx # ecx = TSS base
+
+ movl %esp, %ebp
+
+ #
+ # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ # is 16-byte aligned
+ #
+ andl $0xfffffff0, %esp
+ subl $12, %esp
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pushl TSS_EAX(%ecx)
+ pushl TSS_ECX(%ecx)
+ pushl TSS_EDX(%ecx)
+ pushl TSS_EBX(%ecx)
+ pushl TSS_ESP(%ecx)
+ pushl TSS_EBP(%ecx)
+ pushl TSS_ESI(%ecx)
+ pushl TSS_EDI(%ecx)
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movzwl TSS_SS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_CS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_DS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_ES(%ecx), %eax
+ pushl %eax
+ movzwl TSS_FS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_GS(%ecx), %eax
+ pushl %eax
+
+## UINT32 Eip;
+ pushl TSS_EIP(%ecx)
+
+## UINT32 Gdtr[2], Idtr[2];
+ subl $8, %esp
+ sidt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xFFFF, %eax
+ movl %eax, 4(%esp)
+
+ subl $8, %esp
+ sgdt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xFFFF, %eax
+ movl %eax, 4(%esp)
+
+## UINT32 Ldtr, Tr;
+ movl TSS_SEL, %eax
+ pushl %eax
+ movzwl TSS_LDT(%ecx), %eax
+ pushl %eax
+
+## UINT32 EFlags;
+ pushl TSS_EFLAGS(%ecx)
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ movl %cr4, %eax
+ orl $0x208, %eax
+ movl %eax, %cr4
+ pushl %eax
+ movl %cr3, %eax
+ pushl %eax
+ movl %cr2, %eax
+ pushl %eax
+ xorl %eax, %eax
+ pushl %eax
+ movl %cr0, %eax
+ pushl %eax
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ movl %dr7, %eax
+ pushl %eax
+ movl %dr6, %eax
+ pushl %eax
+ movl %dr3, %eax
+ pushl %eax
+ movl %dr2, %eax
+ pushl %eax
+ movl %dr1, %eax
+ pushl %eax
+ movl %dr0, %eax
+ pushl %eax
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ subl $512, %esp
+ movl %esp, %edi
+ .byte 0x0f, 0xae, 0x07 #fxsave [edi]
+
+# UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+## UINT32 ExceptionData;
+ pushl (%ebp)
+
+## call into exception handler
+ movl ExternalVectorTablePtr, %eax # get the interrupt vectors base
+ orl %eax, %eax # NULL?
+ jz nullExternalExceptionHandler
+
+ movl %ecx, %ebx
+ mov $14, %ecx
+ movl (%eax, %ecx, 4), %eax
+ orl %eax, %eax # NULL?
+ jz nullExternalExceptionHandler
+
+## Prepare parameter and call
+ movl %esp, %edx
+ pushl %edx
+ movl $14, %edx
+ pushl %edx
+
+ #
+ # Call External Exception Handler
+ #
+ call *%eax
+ addl $8, %esp
+
+ movl %ebx, %ecx
+## UINT32 ExceptionData;
+ addl $4, %esp
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ movl %esp, %esi
+ .byte 0xf, 0xae, 0xe # fxrstor [esi]
+ addl $512, %esp
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+## Skip restoration of DRx registers to support in-circuit emualators
+## or debuggers set breakpoint in interrupt/exception context
+ addl $4*6, %esp
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ popl %eax
+ movl %eax, %cr0
+ addl $4, %esp # not for Cr1
+ popl %eax
+ movl %eax, %cr2
+ popl %eax
+ movl %eax, TSS_CR3(%ecx)
+ popl %eax
+ movl %eax, %cr4
+
+## UINT32 EFlags;
+ popl TSS_EFLAGS(%ecx)
+
+## UINT32 Ldtr, Tr;
+## UINT32 Gdtr[2], Idtr[2];
+## Best not let anyone mess with these particular registers...
+ addl $24, %esp
+
+## UINT32 Eip;
+ popl TSS_EIP(%ecx)
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+## NOTE - modified segment registers could hang the debugger... We
+## could attempt to insulate ourselves against this possibility,
+## but that poses risks as well.
+##
+ popl %eax
+ movw %ax, TSS_GS(%ecx)
+ popl %eax
+ movw %ax, TSS_FS(%ecx)
+ popl %eax
+ movw %ax, TSS_ES(%ecx)
+ popl %eax
+ movw %ax, TSS_DS(%ecx)
+ popl %eax
+ movw %ax, TSS_CS(%ecx)
+ popl %eax
+ movw %ax, TSS_SS(%ecx)
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ popl TSS_EDI(%ecx)
+ popl TSS_ESI(%ecx)
+ addl $4, %esp # not for ebp
+ addl $4, %esp # not for esp
+ popl TSS_EBX(%ecx)
+ popl TSS_EDX(%ecx)
+ popl TSS_ECX(%ecx)
+ popl TSS_EAX(%ecx)
+
+ movl %ebp, %esp
+
+# Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+ jz Done2
+# Enable TF bit after page fault handler runs
+ btsl $8, TSS_EFLAGS(%ecx) # EFLAGS
+
+Done2:
+
+ addl 4, %esp # skip ErrCode
+
+ jmp Return
+
+ASM_GLOBAL ASM_PFX(InitializeIDT)
+ASM_PFX(InitializeIDT):
+ pushl %ebx
+ lea _SmiIDT - 8, %edx
+# push IDT_SIZE / 8
+ .byte 0x68 # push /id
+ .long IDT_SIZE / 8
+ lea _SmiExceptionHandlers - 8, %ebx
+ popl %ecx
+L1:
+ leal (%ebx,%ecx,8),%eax
+ movw %ax,(%edx,%ecx,8)
+ shrl $16,%eax
+ movw %ax, 6(%edx, %ecx, 8)
+ loop L1
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ jz L2
+
+#
+# If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+# is a Task Gate Descriptor so that when a Page Fault Exception occurrs,
+# the processors can use a known good stack in case stack ran out.
+#
+ leal _SmiIDT + 14 * 8, %ebx
+ leal TaskGateDescriptor, %edx
+ movl (%edx), %eax
+ movl %eax, (%ebx)
+ movl 4(%edx), %eax
+ movl %eax, 4(%ebx)
+
+L2:
+#
+# Save Page Fault IDT entry in gPageFaultIdtEntry
+#
+ leal _SmiIDT + 14 * 8, %ebx
+ leal ASM_PFX(gSavedPageFaultIdtEntry), %edx
+ movl (%ebx), %eax
+ movl %eax, (%edx)
+ movl 4(%ebx), %eax
+ movl %eax, 4(%edx)
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+ jz L3
+
+#
+# Save INT 1 IDT entry in gSavedDebugExceptionIdtEntry
+#
+ leal _SmiIDT + 1 * 8, %ebx
+ leal ASM_PFX(gSavedDebugExceptionIdtEntry), %edx
+ movl (%ebx), %eax
+ movl %eax, (%edx)
+ movl 4(%ebx), %eax
+ movl %eax, 4(%edx)
+
+L3:
+ popl %ebx
+ ret
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.asm
new file mode 100644
index 0000000000..33b4d8387f
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmiException.asm
@@ -0,0 +1,858 @@
+;; @file
+; Exception handlers used in SM mode
+;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+ .686p
+ .model flat,C
+
+CpuDeadLoop PROTO C
+
+EXTERNDEF gSmiMtrrs:QWORD
+EXTERNDEF gcSmiIdtr:FWORD
+EXTERNDEF gcSmiGdtr:FWORD
+EXTERNDEF gcPsd:BYTE
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
+EXTERNDEF gSavedPageFaultIdtEntry:DWORD
+EXTERNDEF gSavedDebugExceptionIdtEntry:DWORD
+EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE
+
+
+ .data
+
+NullSeg DQ 0 ; reserved by architecture
+ DQ 0 ; reserved for future use
+CodeSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 93h
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+ DQ 0 ; reserved for future use
+CodeSeg16 LABEL QWORD
+ DW -1
+ DW 0
+ DB 0
+ DB 9bh
+ DB 8fh
+ DB 0
+DataSeg16 LABEL QWORD
+ DW -1
+ DW 0
+ DB 0
+ DB 93h
+ DB 8fh
+ DB 0
+CodeSeg64 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0afh ; LimitHigh
+ DB 0 ; BaseHigh
+GDT_SIZE = $ - offset NullSeg
+
+TssSeg LABEL QWORD
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 89h
+ DB 080h ; LimitHigh
+ DB 0 ; BaseHigh
+ExceptionTssSeg LABEL QWORD
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 89h
+ DB 080h ; LimitHigh
+ DB 0 ; BaseHigh
+
+CODE_SEL = offset CodeSeg32 - offset NullSeg
+DATA_SEL = offset DataSeg32 - offset NullSeg
+TSS_SEL = offset TssSeg - offset NullSeg
+EXCEPTION_TSS_SEL = offset ExceptionTssSeg - offset NullSeg
+
+IA32_TSS STRUC
+ DW ?
+ DW ?
+ ESP0 DD ?
+ SS0 DW ?
+ DW ?
+ ESP1 DD ?
+ SS1 DW ?
+ DW ?
+ ESP2 DD ?
+ SS2 DW ?
+ DW ?
+ _CR3 DD ?
+ EIP DD ?
+ EFLAGS DD ?
+ _EAX DD ?
+ _ECX DD ?
+ _EDX DD ?
+ _EBX DD ?
+ _ESP DD ?
+ _EBP DD ?
+ _ESI DD ?
+ _EDI DD ?
+ _ES DW ?
+ DW ?
+ _CS DW ?
+ DW ?
+ _SS DW ?
+ DW ?
+ _DS DW ?
+ DW ?
+ _FS DW ?
+ DW ?
+ _GS DW ?
+ DW ?
+ LDT DW ?
+ DW ?
+ DW ?
+ DW ?
+IA32_TSS ENDS
+
+; Create 2 TSS segments just after GDT
+TssDescriptor LABEL BYTE
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD 0 ; EIP
+ DD 0 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW 0 ; ES
+ DW 0 ; Reserved
+ DW 0 ; CS
+ DW 0 ; Reserved
+ DW 0 ; SS
+ DW 0 ; Reserved
+ DW 0 ; DS
+ DW 0 ; Reserved
+ DW 0 ; FS
+ DW 0 ; Reserved
+ DW 0 ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+TSS_DESC_SIZE = $ - offset TssDescriptor
+
+ExceptionTssDescriptor LABEL BYTE
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD offset PFHandlerEntry ; EIP
+ DD 00000002 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW DATA_SEL ; ES
+ DW 0 ; Reserved
+ DW CODE_SEL ; CS
+ DW 0 ; Reserved
+ DW DATA_SEL ; SS
+ DW 0 ; Reserved
+ DW DATA_SEL ; DS
+ DW 0 ; Reserved
+ DW DATA_SEL ; FS
+ DW 0 ; Reserved
+ DW DATA_SEL ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+
+gcPsd LABEL BYTE
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 SHL 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0
+ DQ offset NullSeg
+ DD GDT_SIZE
+ DD 0
+ DB 24 dup (0)
+ DQ offset gSmiMtrrs
+PSD_SIZE = $ - offset gcPsd
+
+gcSmiGdtr LABEL FWORD
+ DW GDT_SIZE - 1
+ DD offset NullSeg
+
+gcSmiIdtr LABEL FWORD
+ DW IDT_SIZE - 1
+ DD offset _SmiIDT
+
+_SmiIDT LABEL QWORD
+REPEAT 32
+ DW 0 ; Offset 0:15
+ DW CODE_SEL ; Segment selector
+ DB 0 ; Unused
+ DB 8eh ; Interrupt Gate, Present
+ DW 0 ; Offset 16:31
+ ENDM
+IDT_SIZE = $ - offset _SmiIDT
+
+TaskGateDescriptor LABEL DWORD
+ DW 0 ; Reserved
+ DW EXCEPTION_TSS_SEL ; TSS Segment selector
+ DB 0 ; Reserved
+ DB 85h ; Task Gate, present, DPL = 0
+ DW 0 ; Reserved
+
+;
+; point to the external interrupt vector table
+;
+ExternalVectorTablePtr DWORD 0
+
+;
+; Saved IDT Entry for Page Fault
+;
+gSavedPageFaultIdtEntry LABEL DWORD
+ DD 0
+ DD 0
+
+;
+; Saved IDT Entry for INT 1
+;
+gSavedDebugExceptionIdtEntry LABEL DWORD
+ DD 0
+ DD 0
+
+
+ .code
+
+InitializeSmmExternalVectorTablePtr PROC PUBLIC
+ mov eax, [esp+4]
+ mov ExternalVectorTablePtr, eax
+ ret
+InitializeSmmExternalVectorTablePtr ENDP
+
+;------------------------------------------------------------------------------
+; Exception handlers
+;------------------------------------------------------------------------------
+_SmiExceptionHandlers PROC
+IHDLRIDX = 0
+REPEAT 8 ; INT0 ~ INT7
+ push eax ; dummy error code
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+REPEAT 1 ; INT8
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+ int 3
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+REPEAT 1 ; INT9
+ push eax ; dummy error code
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+REPEAT 5 ; INT10 ~ INT14
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+ int 3
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+REPEAT 2 ; INT15 ~ INT16
+ push eax ; dummy error code
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+REPEAT 1 ; INT17
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+ int 3
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+REPEAT 14 ; INT18 ~ INT31
+ push eax ; dummy error code
+ push IHDLRIDX
+ DB 0e9h ; jmp disp32
+ DD _SmiExceptionEntryPoint - ($ + 4)
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+_SmiExceptionHandlers ENDP
+
+;------------------------------------------------------------------------------
+; _SmiExceptionEntryPoint is the entry point for all exceptions
+;
+;
+; Stack:
+; +---------------------+
+; + EFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + EIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + EBP +
+; +---------------------+ <-- EBP
+;
+;
+;------------------------------------------------------------------------------
+_SmiExceptionEntryPoint PROC
+
+
+ push ebp
+ mov ebp, esp
+
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0fffffff0h
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push eax
+ push ecx
+ push edx
+ push ebx
+ lea ecx, [ebp + 6 * 4]
+ push ecx ; ESP
+ push dword ptr [ebp] ; EBP
+ push esi
+ push edi
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ mov eax, ss
+ push eax
+ movzx eax, word ptr [ebp + 4 * 4]
+ push eax
+ mov eax, ds
+ push eax
+ mov eax, es
+ push eax
+ mov eax, fs
+ push eax
+ mov eax, gs
+ push eax
+
+;; UINT32 Eip;
+ mov eax, [ebp + 3 * 4]
+ push eax
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ xor eax, eax
+ str ax
+ push eax
+ sldt ax
+ push eax
+
+;; UINT32 EFlags;
+ mov eax, [ebp + 5 * 4]
+ push eax
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 208h
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ sub esp, 512
+ mov edi, esp
+ db 0fh, 0aeh, 07h ;fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword ptr [ebp + 2 * 4]
+
+;; call into exception handler
+ mov eax, ExternalVectorTablePtr ; get the interrupt vectors base
+ or eax, eax ; NULL?
+ jz nullExternalExceptionHandler
+
+ mov ecx, [ebp + 4]
+ mov eax, [eax + ecx * 4]
+ or eax, eax ; NULL?
+ jz nullExternalExceptionHandler
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, dword ptr [ebp + 1 * 4]
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ call eax
+ add esp, 8
+ jmp @F
+
+nullExternalExceptionHandler:
+; CpuDeadLoop() is the default exception handler since it preserves the processor
+; branch log.
+ call CpuDeadLoop
+
+@@:
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ db 0fh, 0aeh, 0eh ; fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support in-circuit emualators
+;; or debuggers set breakpoint in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov cr3, eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop dword ptr [ebp + 5 * 4]
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop dword ptr [ebp + 3 * 4]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop gs
+ pop fs
+ pop es
+ pop ds
+ pop dword ptr [ebp + 4 * 4]
+ pop ss
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop edi
+ pop esi
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+
+ mov esp, ebp
+ pop ebp
+
+; Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmp FeaturePcdGet (PcdCpuSmmProfileEnable), 0
+ jz @Done
+; Check if this is page fault exception
+ cmp dword ptr [esp], 0eh
+ jnz @F
+; Enable TF bit after page fault handler runs
+ bts dword ptr [esp + 16], 8 ; EFLAGS
+ jmp @Done
+@@:
+; Check if this is INT 1 exception
+ cmp dword ptr [esp], 1
+ jnz @Done
+; Clear TF bit after INT1 handler runs
+ btc dword ptr [esp + 16], 8 ; EFLAGS
+@Done:
+
+ add esp, 8 ; skip INT# & ErrCode
+Return:
+ iretd
+;
+; Page Fault Exception Handler entry when SMM Stack Guard is enabled
+; Executiot starts here after a task switch
+;
+PFHandlerEntry::
+;
+; Get this processor's TSS
+;
+ sub esp, 8
+ sgdt [esp + 2]
+ mov eax, [esp + 4] ; GDT base
+ add esp, 8
+ mov ecx, [eax + TSS_SEL + 2]
+ shl ecx, 8
+ mov cl, [eax + TSS_SEL + 7]
+ ror ecx, 8 ; ecx = TSS base
+
+ mov ebp, esp
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0fffffff0h
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push (IA32_TSS ptr [ecx])._EAX
+ push (IA32_TSS ptr [ecx])._ECX
+ push (IA32_TSS ptr [ecx])._EDX
+ push (IA32_TSS ptr [ecx])._EBX
+ push (IA32_TSS ptr [ecx])._ESP
+ push (IA32_TSS ptr [ecx])._EBP
+ push (IA32_TSS ptr [ecx])._ESI
+ push (IA32_TSS ptr [ecx])._EDI
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movzx eax, (IA32_TSS ptr [ecx])._SS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._CS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._DS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._ES
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._FS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._GS
+ push eax
+
+;; UINT32 Eip;
+ push (IA32_TSS ptr [ecx]).EIP
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ mov eax, TSS_SEL
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx]).LDT
+ push eax
+
+;; UINT32 EFlags;
+ push (IA32_TSS ptr [ecx]).EFLAGS
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 208h
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ sub esp, 512
+ mov edi, esp
+ db 0fh, 0aeh, 07h ;fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword ptr [ebp]
+
+;; call into exception handler
+ mov eax, ExternalVectorTablePtr ; get the interrupt vectors base
+ or eax, eax ; NULL?
+ jz nullExternalExceptionHandler
+
+ mov ebx, ecx
+ mov ecx, 14
+ mov eax, [eax + ecx * 4]
+ or eax, eax ; NULL?
+ jz nullExternalExceptionHandler
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, 14
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ call eax
+ add esp, 8
+
+ mov ecx, ebx
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ db 0fh, 0aeh, 0eh ; fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support in-circuit emualators
+;; or debuggers set breakpoint in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._CR3, eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop (IA32_TSS ptr [ecx]).EFLAGS
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop (IA32_TSS ptr [ecx]).EIP
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop eax
+ mov (IA32_TSS ptr [ecx])._GS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._FS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._ES, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._DS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._CS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._SS, ax
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop (IA32_TSS ptr [ecx])._EDI
+ pop (IA32_TSS ptr [ecx])._ESI
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop (IA32_TSS ptr [ecx])._EBX
+ pop (IA32_TSS ptr [ecx])._EDX
+ pop (IA32_TSS ptr [ecx])._ECX
+ pop (IA32_TSS ptr [ecx])._EAX
+
+ mov esp, ebp
+
+; Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmp FeaturePcdGet (PcdCpuSmmProfileEnable), 0
+ jz @Done2
+; Enable TF bit after page fault handler runs
+ bts (IA32_TSS ptr [ecx]).EFLAGS, 8 ; EFLAGS
+
+@Done2:
+
+ add esp, 4 ; skip ErrCode
+
+ jmp Return
+_SmiExceptionEntryPoint ENDP
+
+InitializeIDT PROC USES ebx
+ lea edx, _SmiIDT - 8
+; push IDT_SIZE / 8
+ DB 68h ; push /id
+ DD IDT_SIZE / 8
+ lea ebx, _SmiExceptionHandlers - 8
+ pop ecx
+@@:
+ lea eax, [ebx + ecx*8]
+ mov [edx + ecx*8], ax
+ shr eax, 16
+ mov [edx + ecx*8 + 6], ax
+ loop @B
+
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
+ jz @F
+
+;
+; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+; is a Task Gate Descriptor so that when a Page Fault Exception occurrs,
+; the processors can use a known good stack in case stack is ran out.
+;
+ lea ebx, _SmiIDT + 14 * 8
+ lea edx, TaskGateDescriptor
+ mov eax, [edx]
+ mov [ebx], eax
+ mov eax, [edx + 4]
+ mov [ebx + 4], eax
+
+@@:
+;
+; Save Page Fault IDT entry in gPageFaultIdtEntry
+;
+ lea ebx, _SmiIDT + 14 * 8
+ lea edx, gSavedPageFaultIdtEntry
+ mov eax, [ebx]
+ mov [edx], eax
+ mov eax, [ebx + 4]
+ mov [edx + 4], eax
+
+ cmp FeaturePcdGet (PcdCpuSmmProfileEnable), 0
+ jz @F
+
+;
+; Save INT 1 IDT entry in gSavedDebugExceptionIdtEntry
+;
+ lea ebx, _SmiIDT + 1 * 8
+ lea edx, gSavedDebugExceptionIdtEntry
+ mov eax, [ebx]
+ mov [edx], eax
+ mov eax, [ebx + 4]
+ mov [edx + 4], eax
+
+@@:
+ ret
+InitializeIDT ENDP
+
+ END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.S
new file mode 100644
index 0000000000..0a19a3949e
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.S
@@ -0,0 +1,94 @@
+## @file
+# Functions for relocating SMBASE's for all processors
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+
+ASM_GLOBAL ASM_PFX(gSmmCr0)
+ASM_GLOBAL ASM_PFX(gSmmCr3)
+ASM_GLOBAL ASM_PFX(gSmmCr4)
+ASM_GLOBAL ASM_PFX(gcSmmInitTemplate)
+ASM_GLOBAL ASM_PFX(gcSmmInitSize)
+ASM_GLOBAL ASM_PFX(gSmmJmpAddr)
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_GLOBAL ASM_PFX(gSmmInitStack)
+
+ .data
+
+NullSeg: .quad 0
+DataSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x93
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+CodeSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+.equ GDT_SIZE, . - NullSeg
+
+ .text
+
+GdtDesc:
+ .word GDT_SIZE
+ .long NullSeg
+
+SmmStartup:
+ .byte 0x66,0xb8
+ASM_PFX(gSmmCr3): .space 4
+ movl %eax, %cr3
+ .byte 0x67,0x66
+ lgdt %cs:(GdtDesc - SmmStartup)(%ebp)
+ .byte 0x66,0xb8
+ASM_PFX(gSmmCr4): .space 4
+ movl %eax, %cr4
+ .byte 0x66,0xb8
+ASM_PFX(gSmmCr0): .space 4
+ .byte 0xbf,8,0 # mov di, 8
+ movl %eax, %cr0
+ .byte 0x66,0xea # jmp far [ptr48]
+ASM_PFX(gSmmJmpAddr): .long Start32bit
+ .word 0x10
+Start32bit:
+ movl %edi,%ds
+ movl %edi,%es
+ movl %edi,%fs
+ movl %edi,%gs
+ movl %edi,%ss
+ .byte 0xbc # mov esp, imm32
+ASM_PFX(gSmmInitStack): .space 4
+ call ASM_PFX(SmmInitHandler)
+ rsm
+
+ASM_PFX(gcSmmInitTemplate):
+
+_SmmInitTemplate:
+ .byte 0x66
+ movl $SmmStartup, %ebp
+ .byte 0x66, 0x81, 0xed, 0, 0, 3, 0 # sub ebp, 0x30000
+ jmp *%bp # jmp ebp actually
+
+ASM_PFX(gcSmmInitSize): .word . - ASM_PFX(gcSmmInitTemplate)
+
+
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ pushl %eax
+ movl ASM_PFX(mRebasedFlag), %eax
+ movb $1, (%eax)
+ popl %eax
+ jmp *ASM_PFX(mSmmRelocationOriginalAddress)
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.asm
new file mode 100644
index 0000000000..1eed4ba2bb
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmInit.asm
@@ -0,0 +1,104 @@
+;; @file
+; Functions for relocating SMBASE's for all processors
+;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+ .686p
+ .xmm
+ .model flat,C
+
+SmmInitHandler PROTO C
+
+EXTERNDEF C gSmmCr0:DWORD
+EXTERNDEF C gSmmCr3:DWORD
+EXTERNDEF C gSmmCr4:DWORD
+EXTERNDEF C gcSmmInitTemplate:BYTE
+EXTERNDEF C gcSmmInitSize:WORD
+EXTERNDEF C gSmmJmpAddr:QWORD
+EXTERNDEF C mRebasedFlag:PTR BYTE
+EXTERNDEF C mSmmRelocationOriginalAddress:DWORD
+EXTERNDEF C gSmmInitStack:DWORD
+
+ .data
+
+NullSeg DQ 0 ; reserved by architecture
+DataSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 93h
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+CodeSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+GDT_SIZE = $ - offset NullSeg
+
+ .code
+
+GdtDesc LABEL FWORD
+ DW GDT_SIZE
+ DD offset NullSeg
+
+SmmStartup PROC
+ DB 66h, 0b8h
+gSmmCr3 DD ?
+ mov cr3, eax
+ DB 67h, 66h
+ lgdt fword ptr cs:[ebp + (offset GdtDesc - SmmStartup)]
+ DB 66h, 0b8h
+gSmmCr4 DD ?
+ mov cr4, eax
+ DB 66h, 0b8h
+gSmmCr0 DD ?
+ DB 0bfh, 8, 0 ; mov di, 8
+ mov cr0, eax
+ DB 66h, 0eah ; jmp far [ptr48]
+gSmmJmpAddr LABEL QWORD
+ DD @32bit
+ DW 10h
+@32bit:
+ mov ds, edi
+ mov es, edi
+ mov fs, edi
+ mov gs, edi
+ mov ss, edi
+ DB 0bch ; mov esp, imm32
+gSmmInitStack DD ?
+ call SmmInitHandler
+ rsm
+SmmStartup ENDP
+
+gcSmmInitTemplate LABEL BYTE
+
+_SmmInitTemplate PROC
+ DB 66h
+ mov ebp, SmmStartup
+ DB 66h, 81h, 0edh, 00h, 00h, 03h, 00 ; sub ebp, 30000h
+ jmp bp ; jmp ebp actually
+_SmmInitTemplate ENDP
+
+gcSmmInitSize DW $ - gcSmmInitTemplate
+
+SmmRelocationSemaphoreComplete PROC
+ push eax
+ mov eax, mRebasedFlag
+ mov byte ptr [eax], 1
+ pop eax
+ jmp [mSmmRelocationOriginalAddress]
+SmmRelocationSemaphoreComplete ENDP
+ END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
new file mode 100644
index 0000000000..fbca8c4afe
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -0,0 +1,66 @@
+/** @file
+ IA-32 processor specific functions to enable SMM profile.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (0);
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+ 32-bit firmware does not need it.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
new file mode 100644
index 0000000000..e41949563c
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
@@ -0,0 +1,98 @@
+/** @file
+ IA-32 processor specific header file.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT32 BTSBufferBase;
+ UINT32 BTSIndex;
+ UINT32 BTSAbsoluteMaximum;
+ UINT32 BTSInterruptThreshold;
+ UINT32 PEBSBufferBase;
+ UINT32 PEBSIndex;
+ UINT32 PEBSAbsoluteMaximum;
+ UINT32 PEBSInterruptThreshold;
+ UINT32 PEBSCounterReset[4];
+ UINT32 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT32 LastBranchFrom;
+ UINT32 LastBranchTo;
+ UINT32 Rsvd0 : 4;
+ UINT32 BranchPredicted : 1;
+ UINT32 Rsvd1 : 27;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT32 Eflags;
+ UINT32 LinearIP;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+ UINT32 Esi;
+ UINT32 Edi;
+ UINT32 Ebp;
+ UINT32 Esp;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 32) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/MpService.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/MpService.c
new file mode 100644
index 0000000000..76fd20dc1b
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/MpService.c
@@ -0,0 +1,1759 @@
+/** @file
+ SMM MP service inplementation
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+//
+// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
+//
+UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];
+UINT64 gPhyMask;
+SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
+BOOLEAN mExceptionHandlerReady = FALSE;
+SPIN_LOCK mIdtTableLock;
+
+/**
+ Performs an atomic compare exchange operation to get semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param[in, out] Sem IN: 32-bit unsigned integer
+ OUT: original integer - 1
+ @return Orignal integer - 1
+
+**/
+UINT32
+WaitForSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (Value == 0 ||
+ InterlockedCompareExchange32 (
+ (UINT32*)Sem,
+ Value,
+ Value - 1
+ ) != Value);
+ return Value - 1;
+}
+
+/**
+ Performs an atomic compare exchange operation to release semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param[in, out] Sem IN: 32-bit unsigned integer
+ OUT: original integer + 1
+ @return Orignal integer + 1
+
+**/
+UINT32
+ReleaseSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (Value + 1 != 0 &&
+ InterlockedCompareExchange32 (
+ (UINT32*)Sem,
+ Value,
+ Value + 1
+ ) != Value);
+ return Value + 1;
+}
+
+/**
+ Performs an atomic compare exchange operation to lock semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param[in, out] Sem IN: 32-bit unsigned integer
+ OUT: -1
+ @return Orignal integer
+
+**/
+UINT32
+LockdownSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (InterlockedCompareExchange32 (
+ (UINT32*)Sem,
+ Value, (UINT32)-1
+ ) != Value);
+ return Value;
+}
+
+/**
+ Wait all APs to performs an atomic compare exchange operation to release semaphore.
+
+ @param[in] NumberOfAPs AP number
+
+**/
+VOID
+WaitForAllAPs (
+ IN UINTN NumberOfAPs
+ )
+{
+ UINTN BspIndex;
+
+ BspIndex = mSmmMpSyncData->BspIndex;
+ while (NumberOfAPs-- > 0) {
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+ }
+}
+
+/**
+ Performs an atomic compare exchange operation to release semaphore
+ for each AP.
+
+**/
+VOID
+ReleaseAllAPs (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN BspIndex;
+
+ BspIndex = mSmmMpSyncData->BspIndex;
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {
+ if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);
+ }
+ }
+}
+
+/**
+ Checks if all CPUs (with certain exceptions) have checked in for this SMI run
+
+ NOTE: for Haswell's exceptions checking, only single-socket CPU is supported
+
+ @param Exceptions CPU Arrival exception flags.
+
+ @retval TRUE if all CPUs the have checked in.
+ @retval FALSE if at least one Normal AP hasn't checked in.
+
+**/
+BOOLEAN
+AllCpusInSmmWithExceptions (
+ SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
+ )
+{
+ UINTN Index;
+ SMM_CPU_SYNC_FEATURE *SyncFeature;
+ SMM_CPU_SYNC_FEATURE *SmmSyncFeatures;
+ UINT64 LogProcEnBit;
+ SMM_CPU_DATA_BLOCK *CpuData;
+ EFI_PROCESSOR_INFORMATION *ProcessorInfo;
+
+ ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ if (mSmmMpSyncData->Counter == mNumberOfCpus) {
+ return TRUE;
+ }
+
+ CpuData = mSmmMpSyncData->CpuData;
+ ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
+ SmmSyncFeatures = gSmmCpuPrivate->SmmSyncFeature;
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {
+ if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
+ SyncFeature = &(SmmSyncFeatures[Index]);
+ LogProcEnBit = SyncFeature->HaswellLogProcEnBit;
+ if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SyncFeature->DelayIndicationSupported && (SmmReadReg64 (Index, SmmRegSmmDelayed) & LogProcEnBit)) {
+ continue;
+ }
+ if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SyncFeature->BlockIndicationSupported && (SmmReadReg64 (Index, SmmRegSmmBlocked) & LogProcEnBit)) {
+ continue;
+ }
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/**
+ Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
+ entering SMM, except SMI disabled APs.
+
+**/
+VOID
+SmmWaitForApArrival (
+ VOID
+ )
+{
+ UINT64 Timer;
+ UINTN Index;
+
+ ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ //
+ // Platform implementor should choose a timeout value appropriately:
+ // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
+ // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
+ // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
+ // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
+ // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
+ // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
+ // - The timeout value must be longer than longest possible IO operation in the system
+ // (BTW, ** to confirm **, Is Slow I/O operation reported as an SMM Delayed status in Haswell?)
+ //
+
+ //
+ // Sync with APs 1st timeout
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
+ ) {
+ CpuPause ();
+ }
+
+ //
+ // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
+ // because:
+ // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
+ // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
+ // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
+ // work while SMI handling is on-going.
+ // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
+ // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
+ // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
+ // mode work while SMI handling is on-going.
+ // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
+ // - In traditional flow, SMI disabling is discouraged.
+ // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
+ // In both cases, adding SMI-disabling checking code increases overhead.
+ //
+ if (mSmmMpSyncData->Counter < mNumberOfCpus) {
+ //
+ // Send SMI IPIs to bring outside processors in
+ //
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {
+ if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
+ }
+ }
+
+ //
+ // Sync with APs 2nd timeout.
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
+ ) {
+ CpuPause ();
+ }
+ }
+
+ return;
+}
+
+/**
+ Replace OS MTRR's with SMI MTRR's.
+
+ @param[in] CpuIndex Processor Index
+
+**/
+VOID
+ReplaceOSMtrrs (
+ IN UINTN CpuIndex
+ )
+{
+ PROCESSOR_SMM_DESCRIPTOR *Psd;
+ UINT64 *SmiMtrrs;
+ MTRR_SETTINGS *BiosMtrr;
+
+ Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);
+ SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;
+
+ //
+ // Replace all MTRRs registers
+ //
+ BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;
+ MtrrSetAllMtrrs(BiosMtrr);
+}
+
+/**
+ SMI handler for BSP.
+
+ @param[in] CpuIndex BSP processor Index
+ @param[in] SyncMode SMM MP sync mode
+
+**/
+VOID
+BSPHandler (
+ IN UINTN CpuIndex,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ UINTN Index;
+ MTRR_SETTINGS Mtrrs;
+ UINTN ApCount;
+ BOOLEAN ClearTopLevelSmiResult;
+ UINTN PresentCount;
+
+ ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
+ ApCount = 0;
+
+ //
+ // Flag BSP's presence
+ //
+ mSmmMpSyncData->InsideSmm = TRUE;
+
+ //
+ // Initialize Debug Agent to start source level debug in BSP handler
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
+
+ //
+ // Mark this processor's presence
+ //
+ mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
+
+ //
+ // Clear platform top level SMI status bit before calling SMI handlers. If
+ // we cleared it after SMI handlers are run, we would miss the SMI that
+ // occurs after SMI handlers are done and before SMI status bit is cleared.
+ //
+ ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
+ ASSERT (ClearTopLevelSmiResult == TRUE);
+
+ //
+ // Set running processor index
+ //
+ gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
+
+ //
+ // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
+ // To-Do: make NeedConfigureMtrrs a PCD?
+ //
+ if (SyncMode == SmmCpuSyncModeTradition || NeedConfigureMtrrs()) {
+
+ //
+ // Wait for APs to arrive
+ //
+ SmmWaitForApArrival();
+
+ //
+ // Lock the counter down and retrieve the number of APs
+ //
+ mSmmMpSyncData->AllCpusInSync = TRUE;
+ ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
+
+ //
+ // Wait for all APs to get ready for programming MTRRs
+ //
+ WaitForAllAPs (ApCount);
+
+ if (NeedConfigureMtrrs()) {
+ //
+ // Signal all APs it's time for backup MTRRs
+ //
+ ReleaseAllAPs ();
+
+ //
+ // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
+ // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
+ // to a large enough value to avoid this situation.
+ // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
+ // We do the backup first and then set MTRR to avoid race condition for threads
+ // in the same core.
+ //
+ MtrrGetAllMtrrs(&Mtrrs);
+
+ //
+ // Wait for all APs to complete their MTRR saving
+ //
+ WaitForAllAPs (ApCount);
+
+ //
+ // Let all processors program SMM MTRRs together
+ //
+ ReleaseAllAPs ();
+
+ //
+ // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
+ // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
+ // to a large enough value to avoid this situation.
+ //
+ ReplaceOSMtrrs (CpuIndex);
+
+ //
+ // Wait for all APs to complete their MTRR programming
+ //
+ WaitForAllAPs (ApCount);
+ }
+ }
+
+ //
+ // The BUSY lock is initialized to Acquired state
+ //
+ AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+
+ //
+ // Special handlement for S3 boot path.
+ //
+ RestoreSmmConfigurationInS3 ();
+
+ //
+ // Invoke SMM Foundation EntryPoint with the processor information context.
+ //
+ gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
+
+ //
+ // Make sure all APs have completed their pending none-block tasks
+ //
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {
+ if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {
+ AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;
+ }
+ }
+
+ //
+ // Perform the remaining tasks
+ //
+ PerformRemainingTasks ();
+
+ //
+ // Set the EOS bit before SMI resume.
+ //
+ // BUGBUG: The following is a chipset specific action from a CPU module.
+ //
+ ClearSmi();
+
+ //
+ // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
+ // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
+ // will run through freely.
+ //
+ if (SyncMode != SmmCpuSyncModeTradition && !NeedConfigureMtrrs()) {
+
+ //
+ // Lock the counter down and retrieve the number of APs
+ //
+ mSmmMpSyncData->AllCpusInSync = TRUE;
+ ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;
+ //
+ // Make sure all APs have their Present flag set
+ //
+ while (TRUE) {
+ PresentCount = 0;
+ for (Index = mMaxNumberOfCpus; Index-- > 0;) {
+ if (mSmmMpSyncData->CpuData[Index].Present) {
+ PresentCount ++;
+ }
+ }
+ if (PresentCount > ApCount) {
+ break;
+ }
+ }
+ }
+
+ //
+ // Notify all APs to exit
+ //
+ mSmmMpSyncData->InsideSmm = FALSE;
+ ReleaseAllAPs ();
+
+ //
+ // Wait for all APs to complete their pending tasks
+ //
+ WaitForAllAPs (ApCount);
+
+ //
+ // Set the effective Sync Mode for next SMI run.
+ // Note this setting must be performed in the window where no APs can check in.
+ //
+ mSmmMpSyncData->EffectiveSyncMode = mSmmMpSyncData->SyncModeToBeSet;
+
+ if (NeedConfigureMtrrs()) {
+ //
+ // Signal APs to restore MTRRs
+ //
+ ReleaseAllAPs ();
+
+ //
+ // Restore OS MTRRs
+ //
+ MtrrSetAllMtrrs(&Mtrrs);
+
+ //
+ // Wait for all APs to complete MTRR programming
+ //
+ WaitForAllAPs (ApCount);
+ }
+
+ //
+ // Stop source level debug in BSP handler, the code below will not be
+ // debugged.
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
+
+ //
+ // Signal APs to Reset states/semaphore for this processor
+ //
+ ReleaseAllAPs ();
+
+ //
+ // Perform pending operations for hot-plug
+ //
+ SmmCpuUpdate ();
+
+ //
+ // Clear the Present flag of BSP
+ //
+ mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
+
+ //
+ // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
+ // WaitForAllAps does not depend on the Present flag.
+ //
+ WaitForAllAPs (ApCount);
+
+ //
+ // Reset BspIndex to -1, meaning BSP has not been elected.
+ //
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
+ mSmmMpSyncData->BspIndex = (UINT32)-1;
+ }
+
+ //
+ // Allow APs to check in from this point on
+ //
+ mSmmMpSyncData->Counter = 0;
+ mSmmMpSyncData->AllCpusInSync = FALSE;
+}
+
+/**
+ SMI handler for AP.
+
+ @param[in] CpuIndex AP processor Index.
+ @param[in] ValidSmi Indicates that current SMI is a valid SMI or not.
+ @param[in] SyncMode SMM MP sync mode.
+
+**/
+VOID
+APHandler (
+ IN UINTN CpuIndex,
+ IN BOOLEAN ValidSmi,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ UINT64 Timer;
+ UINTN BspIndex;
+ MTRR_SETTINGS Mtrrs;
+
+ //
+ // Timeout BSP
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !mSmmMpSyncData->InsideSmm;
+ ) {
+ CpuPause ();
+ }
+
+ if (!mSmmMpSyncData->InsideSmm) {
+ //
+ // BSP timeout in the first round
+ //
+ if (mSmmMpSyncData->BspIndex != -1) {
+ //
+ // BSP Index is known
+ //
+ BspIndex = mSmmMpSyncData->BspIndex;
+ ASSERT (CpuIndex != BspIndex);
+
+ //
+ // Send SMI IPI to bring BSP in
+ //
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
+
+ //
+ // Now clock BSP for the 2nd time
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !mSmmMpSyncData->InsideSmm;
+ ) {
+ CpuPause ();
+ }
+
+ if (!mSmmMpSyncData->InsideSmm) {
+ //
+ // Give up since BSP is unable to enter SMM
+ // and signal the completion of this AP
+ WaitForSemaphore (&mSmmMpSyncData->Counter);
+ return;
+ }
+ } else {
+ //
+ // Don't know BSP index. Give up without sending IPI to BSP.
+ //
+ WaitForSemaphore (&mSmmMpSyncData->Counter);
+ return;
+ }
+ }
+
+ //
+ // BSP is available
+ //
+ BspIndex = mSmmMpSyncData->BspIndex;
+ ASSERT (CpuIndex != BspIndex);
+
+ //
+ // Mark this processor's presence
+ //
+ mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;
+
+ if (SyncMode == SmmCpuSyncModeTradition || NeedConfigureMtrrs()) {
+ //
+ // Notify BSP of arrival at this point
+ //
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+ }
+
+ if (NeedConfigureMtrrs()) {
+ //
+ // Wait for the signal from BSP to backup MTRRs
+ //
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Backup OS MTRRs
+ //
+ MtrrGetAllMtrrs(&Mtrrs);
+
+ //
+ // Signal BSP the completion of this AP
+ //
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+
+ //
+ // Wait for BSP's signal to program MTRRs
+ //
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Replace OS MTRRs with SMI MTRRs
+ //
+ ReplaceOSMtrrs (CpuIndex);
+
+ //
+ // Signal BSP the completion of this AP
+ //
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+ }
+
+ while (TRUE) {
+ //
+ // Wait for something to happen
+ //
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Check if BSP wants to exit SMM
+ //
+ if (!mSmmMpSyncData->InsideSmm) {
+ break;
+ }
+
+ //
+ // BUSY should be acquired by SmmStartupThisAp()
+ //
+ ASSERT (
+ !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)
+ );
+
+ //
+ // Invoke the scheduled procedure
+ //
+ (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
+ (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
+ );
+
+ //
+ // Release BUSY
+ //
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ }
+
+ if (NeedConfigureMtrrs()) {
+ //
+ // Notify BSP the readiness of this AP to program MTRRs
+ //
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+
+ //
+ // Wait for the signal from BSP to program MTRRs
+ //
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Restore OS MTRRs
+ //
+ MtrrSetAllMtrrs(&Mtrrs);
+ }
+
+ //
+ // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
+ //
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+
+ //
+ // Wait for the signal from BSP to Reset states/semaphore for this processor
+ //
+ WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Reset states/semaphore for this processor
+ //
+ mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;
+
+ //
+ // Notify BSP the readiness of this AP to exit SMM
+ //
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);
+
+}
+
+/**
+ Create 4G PageTable in SMRAM.
+
+ @param[in] ExtraPages Additional page numbers besides for 4G memory
+ @return PageTable Address
+
+**/
+UINT32
+Gen4GPageTable (
+ IN UINTN ExtraPages
+ )
+{
+ VOID *PageTable;
+ UINTN Index;
+ UINT64 *Pte;
+ UINTN PagesNeeded;
+ UINTN Low2MBoundary;
+ UINTN High2MBoundary;
+ UINTN Pages;
+ UINTN GuardPage;
+ UINT64 *Pdpte;
+ UINTN PageIndex;
+ UINTN PageAddress;
+
+ Low2MBoundary = 0;
+ High2MBoundary = 0;
+ PagesNeeded = 0;
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Add one more page for known good stack, then find the lower 2MB aligned address.
+ //
+ Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
+ //
+ // Add two more pages for known good stack and stack guard page,
+ // then find the lower 2MB aligned address.
+ //
+ High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
+ PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
+ }
+ //
+ // Allocate the page table
+ //
+ PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded);
+ ASSERT (PageTable != NULL);
+
+ PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
+ Pte = (UINT64*)PageTable;
+
+ //
+ // Zero out all page table entries first
+ //
+ ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
+
+ //
+ // Set Page Directory Pointers
+ //
+ for (Index = 0; Index < 4; Index++) {
+ Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P;
+ }
+ Pte += EFI_PAGE_SIZE / sizeof (*Pte);
+
+ //
+ // Fill in Page Directory Entries
+ //
+ for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
+ Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P;
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
+ GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
+ Pdpte = (UINT64*)PageTable;
+ for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
+ Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P;
+ //
+ // Fill in Page Table Entries
+ //
+ Pte = (UINT64*)Pages;
+ PageAddress = PageIndex;
+ for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
+ if (PageAddress == GuardPage) {
+ //
+ // Mark the guard page as non-present
+ //
+ Pte[Index] = PageAddress;
+ GuardPage += mSmmStackSize;
+ if (GuardPage > mSmmStackArrayEnd) {
+ GuardPage = 0;
+ }
+ } else {
+ Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P;
+ }
+ PageAddress+= EFI_PAGE_SIZE;
+ }
+ Pages += EFI_PAGE_SIZE;
+ }
+ }
+
+ return (UINT32)(UINTN)PageTable;
+}
+
+/**
+ Set memory cache ability.
+
+ @param[in] PageTable PageTable Address
+ @param[in] Address Memory Address to change cache ability
+ @param[in] Cacheability Cache ability to set
+
+**/
+VOID
+SetCacheability (
+ IN UINT64 *PageTable,
+ IN UINTN Address,
+ IN UINT8 Cacheability
+ )
+{
+ UINTN PTIndex;
+ VOID *NewPageTableAddress;
+ UINT64 *NewPageTable;
+ UINTN Index;
+
+ ASSERT ((Address & EFI_PAGE_MASK) == 0);
+
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
+ }
+
+ PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
+
+ //
+ // A perfect implementation should check the original cacheability with the
+ // one being set, and break a 2M page entry into pieces only when they
+ // disagreed.
+ //
+ PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ //
+ // Allocate a page from SMRAM
+ //
+ NewPageTableAddress = AllocatePages (1);
+ ASSERT (NewPageTableAddress != NULL);
+
+ NewPageTable = (UINT64 *)NewPageTableAddress;
+
+ for (Index = 0; Index < 0x200; Index++) {
+ NewPageTable[Index] = PageTable[PTIndex];
+ if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
+ NewPageTable[Index] &= ~IA32_PG_PAT_2M;
+ NewPageTable[Index] |= IA32_PG_PAT_4K;
+ }
+ NewPageTable[Index] |= Index << EFI_PAGE_SHIFT;
+ }
+
+ PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;
+ }
+
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
+
+ PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
+ ASSERT (PageTable[PTIndex] & IA32_PG_P);
+ PageTable[PTIndex] &= ~(IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT);
+ PageTable[PTIndex] |= Cacheability;
+}
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in, out] ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ )
+{
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
+ CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
+ !mSmmMpSyncData->CpuData[CpuIndex].Present ||
+ gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
+ !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
+ mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
+ AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Schedule a procedure to run on the specified CPU in a blocking fashion.
+
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in, out] ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmBlockingStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ )
+{
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
+ CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
+ !mSmmMpSyncData->CpuData[CpuIndex].Present ||
+ gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
+ !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
+ mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
+ ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ return EFI_SUCCESS;
+}
+
+/**
+ C function for SMI entry, each processor comes here upon SMI trigger.
+
+ @param[in] CpuIndex Cpu Index
+
+**/
+VOID
+EFIAPI
+SmiRendezvous (
+ IN UINTN CpuIndex
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN ValidSmi;
+ BOOLEAN IsBsp;
+ BOOLEAN BspInProgress;
+ UINTN Index;
+ UINTN Cr2;
+
+ //
+ // Save Cr2 because Page Fault exception in SMM may override its value
+ //
+ Cr2 = AsmReadCr2 ();
+
+ //
+ // Enable exception table by load IDTR
+ //
+ AsmWriteIdtr (&gcSmiIdtr);
+ if (!mExceptionHandlerReady) {
+ //
+ // If the default CPU exception handlers were not ready
+ //
+ AcquireSpinLock (&mIdtTableLock);
+ if (!mExceptionHandlerReady) {
+ //
+ // Update the IDT entry to handle Page Fault exception
+ //
+ CopyMem (
+ ((IA32_IDT_GATE_DESCRIPTOR *) (gcSmiIdtr.Base + sizeof (IA32_IDT_GATE_DESCRIPTOR) * 14)),
+ &gSavedPageFaultIdtEntry,
+ sizeof (IA32_IDT_GATE_DESCRIPTOR));
+ //
+ // Update the IDT entry to handle debug exception library for smm profile
+ //
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ CopyMem (
+ ((IA32_IDT_GATE_DESCRIPTOR *) (gcSmiIdtr.Base + sizeof (IA32_IDT_GATE_DESCRIPTOR) * 1)),
+ &gSavedDebugExceptionIdtEntry,
+ sizeof (IA32_IDT_GATE_DESCRIPTOR));
+ }
+ //
+ // Set CPU exception handlers ready flag
+ //
+ mExceptionHandlerReady = TRUE;
+ }
+ ReleaseSpinLock (&mIdtTableLock);
+ }
+
+ //
+ // Enable XMM instructions & exceptions
+ //
+ AsmWriteCr4 (AsmReadCr4 () | 0x600);
+
+ //
+ // Perform CPU specific entry hooks
+ //
+ SmmRendezvousEntry (CpuIndex);
+
+ //
+ // Determine if this is a valid Smi
+ //
+ ValidSmi = PlatformValidSmi();
+
+ //
+ // Determine if BSP has been already in progress. Note this must be checked after
+ // ValidSmi because BSP may clear a valid SMI source after checking in.
+ //
+ BspInProgress = mSmmMpSyncData->InsideSmm;
+
+ if (!BspInProgress && !ValidSmi) {
+ //
+ // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
+ // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
+ // status had been cleared by BSP and an existing SMI run has almost ended. (Note
+ // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
+ // is nothing we need to do.
+ //
+ goto Exit;
+ } else {
+ //
+ // Signal presence of this processor
+ //
+ if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {
+ //
+ // BSP has already ended the synchronization, so QUIT!!!
+ //
+
+ //
+ // Wait for BSP's signal to finish SMI
+ //
+ while (mSmmMpSyncData->AllCpusInSync) {
+ CpuPause ();
+ }
+ goto Exit;
+ } else {
+
+ //
+ // The BUSY lock is initialized to Released state.
+ // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
+ // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
+ // after AP's present flag is detected.
+ //
+ InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ ActivateSmmProfile (CpuIndex);
+ }
+
+ if (BspInProgress) {
+ //
+ // BSP has been elected. Follow AP path, regardless of ValidSmi flag
+ // as BSP may have cleared the SMI status
+ //
+ APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
+ } else {
+ //
+ // We have a valid SMI
+ //
+
+ //
+ // Elect BSP
+ //
+ IsBsp = FALSE;
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
+ if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
+ //
+ // Call platform hook to do BSP election
+ //
+ Status = PlatformSmmBspElection (&IsBsp);
+ if (EFI_SUCCESS == Status) {
+ //
+ // Platform hook determines successfully
+ //
+ if (IsBsp) {
+ mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
+ }
+ } else {
+ //
+ // Platform hook fails to determine, use default BSP election method
+ //
+ InterlockedCompareExchange32 (
+ (UINT32*)&mSmmMpSyncData->BspIndex,
+ (UINT32)-1,
+ (UINT32)CpuIndex
+ );
+ }
+ }
+ }
+
+ //
+ // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
+ //
+ if (mSmmMpSyncData->BspIndex == CpuIndex) {
+
+ //
+ // Clear last request for SwitchBsp.
+ //
+ if (mSmmMpSyncData->SwitchBsp) {
+ mSmmMpSyncData->SwitchBsp = FALSE;
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ mSmmMpSyncData->CandidateBsp[Index] = FALSE;
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfileRecordSmiNum ();
+ }
+
+ //
+ // BSP Handler is always called with a ValidSmi == TRUE
+ //
+ BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
+
+ } else {
+ APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
+ }
+ }
+
+ ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
+
+ //
+ // Wait for BSP's signal to exit SMI
+ //
+ while (mSmmMpSyncData->AllCpusInSync) {
+ CpuPause ();
+ }
+ }
+
+Exit:
+ SmmRendezvousExit (CpuIndex);
+ //
+ // Restore Cr2
+ //
+ AsmWriteCr2 (Cr2);
+}
+
+/**
+ Initialize un-cacheable data.
+
+**/
+VOID
+EFIAPI
+InitializeMpSyncData (
+ VOID
+ )
+{
+ if (mSmmMpSyncData != NULL) {
+ ZeroMem (mSmmMpSyncData, sizeof (*mSmmMpSyncData));
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
+ //
+ // Enable BSP election by setting BspIndex to -1
+ //
+ mSmmMpSyncData->BspIndex = (UINT32)-1;
+ }
+ mSmmMpSyncData->SyncModeToBeSet = SmmCpuSyncModeTradition;
+ mSmmMpSyncData->EffectiveSyncMode = SmmCpuSyncModeTradition;
+ }
+}
+
+/**
+ Initialize global data for MP synchronization.
+
+ @param[in] ImageHandle File Image Handle.
+ @param[in] Stacks Base address of SMI stack buffer for all processors.
+ @param[in] StackSize Stack size for each processor in SMM.
+
+**/
+VOID
+InitializeMpServiceData (
+ IN EFI_HANDLE ImageHandle,
+ IN VOID *Stacks,
+ IN UINTN StackSize
+ )
+{
+ UINTN Index;
+ MTRR_SETTINGS *Mtrr;
+ PROCESSOR_SMM_DESCRIPTOR *Psd;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+
+ //
+ // Initialize physical address mask
+ // NOTE: Physical memory above virtual address limit is not supported !!!
+ //
+ AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
+ gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
+ gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
+
+ InitializeSmmMtrrManager ();
+ //
+ // Create page tables
+ //
+ gSmiCr3 = SmmInitPageTable ();
+
+ //
+ // Initialize spin lock for setting up CPU exception handler
+ //
+ InitializeSpinLock (&mIdtTableLock);
+
+ //
+ // Initialize SMM startup code & PROCESSOR_SMM_DESCRIPTOR structures
+ //
+ gSmiStack = (UINTN)Stacks + StackSize - sizeof (UINTN);
+
+ //
+ // The Smi Handler of CPU[i] and PSD of CPU[i+x] are in the same SMM_CPU_INTERVAL,
+ // and they cannot overlap.
+ //
+ ASSERT (gcSmiHandlerSize + 0x10000 - SMM_PSD_OFFSET < SMM_CPU_INTERVAL);
+ ASSERT (SMM_HANDLER_OFFSET % SMM_CPU_INTERVAL == 0);
+
+ GdtTssTables = NULL;
+ GdtTssTableSize = 0;
+ //
+ // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+ if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ ASSERT (GdtTssTables != NULL);
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTssTableSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Setup top of known good stack as IST1 for each processor.
+ //
+ *(UINTN *)(GdtTssTables + GdtTssTableSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
+ }
+ }
+ } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+
+ //
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+
+ //
+ // Enlarge GDT to contain 2 TSS descriptors
+ //
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
+
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ ASSERT (GdtTssTables != NULL);
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTssTableSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTssTableSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+
+ TssBase += TSS_SIZE;
+ GdtDescriptor++;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+ //
+ // Fixup TSS segments
+ //
+ // ESP as known good stack
+ //
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = gSmiCr3;
+ }
+ }
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ *(UINTN*)gSmiStack = Index;
+ gSmbase = (UINT32)mCpuHotPlugData.SmBase[Index];
+ CopyMem (
+ (VOID*)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET),
+ (VOID*)gcSmiHandlerTemplate,
+ gcSmiHandlerSize
+ );
+
+ Psd = (PROCESSOR_SMM_DESCRIPTOR*)(VOID*)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
+ CopyMem (Psd, &gcPsd, sizeof (gcPsd));
+ if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {
+ //
+ // For X64 SMM, set GDT to the copy allocated above.
+ //
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTssTableSize * Index);
+ } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above.
+ //
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTssTableSize * Index);
+ Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
+ }
+
+ gSmiStack += StackSize;
+ }
+
+ //
+ // Initialize mSmmMpSyncData
+ //
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (sizeof (*mSmmMpSyncData)));
+ ASSERT (mSmmMpSyncData != NULL);
+ InitializeMpSyncData ();
+
+ if (FeaturePcdGet (PcdCpuSmmUncacheCpuSyncData)) {
+ //
+ // mSmmMpSyncData should resides in un-cached RAM
+ //
+ SetCacheability ((UINT64*)(UINTN)gSmiCr3, (UINTN)mSmmMpSyncData, IA32_PG_CD);
+ }
+
+ //
+ // Record current MTRR settings
+ //
+ ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));
+ Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;
+ MtrrGetAllMtrrs (Mtrr);
+
+}
+
+/**
+ Register the SMM Foundation entry point.
+
+ @param[in] This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
+ @param[in] SmmEntryPoint SMM Foundation EntryPoint
+
+ @retval EFI_SUCCESS Successfully to register SMM foundation entry point
+
+**/
+EFI_STATUS
+EFIAPI
+RegisterSmmEntry (
+ IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
+ IN EFI_SMM_ENTRY_POINT SmmEntryPoint
+ )
+{
+ //
+ // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
+ //
+ gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
+ return EFI_SUCCESS;
+}
+
+/**
+ Set SMM synchronization mode starting from the next SMI run.
+
+ @param[in] SyncMode The SMM synchronization mode to be set and effective from the next SMI run.
+
+ @retval EFI_SUCCESS The function completes successfully.
+ @retval EFI_INVALID_PARAMETER SynMode is not valid.
+
+**/
+EFI_STATUS
+SmmSyncSetModeWorker (
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ if (SyncMode >= SmmCpuSyncModeMax) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Sync Mode effective from next SMI run
+ //
+ mSmmMpSyncData->SyncModeToBeSet = SyncMode;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Get current effective SMM synchronization mode.
+
+ @param[out] SyncMode Output parameter. The current effective SMM synchronization mode.
+
+ @retval EFI_SUCCESS The SMM synchronization mode has been retrieved successfully.
+ @retval EFI_INVALID_PARAMETER SyncMode is NULL.
+
+**/
+EFI_STATUS
+SmmSyncGetModeWorker (
+ OUT SMM_CPU_SYNC_MODE *SyncMode
+ )
+{
+ if (SyncMode == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+ *SyncMode = mSmmMpSyncData->EffectiveSyncMode;
+ return EFI_SUCCESS;
+}
+
+/**
+ Given timeout constraint, wait for all APs to arrive. If this function returns EFI_SUCCESS, then
+ no AP will execute normal mode code before entering SMM, so it is safe to access shared hardware resource
+ between SMM and normal mode. Note if there are SMI disabled APs, this function will return EFI_NOT_READY.
+
+ @retval EFI_SUCCESS No AP will execute normal mode code before entering SMM, so it is safe to access
+ shared hardware resource between SMM and normal mode
+ @retval EFI_NOT_READY One or more CPUs may still execute normal mode code
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+
+**/
+EFI_STATUS
+SmmCpuSyncCheckApArrivalWorker (
+ VOID
+ )
+{
+ ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ if (mSmmMpSyncData->Counter == mNumberOfCpus) {
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Wait for APs to arrive
+ //
+ SmmWaitForApArrival();
+
+ //
+ // If there are CPUs in SMI Disabled Statue, we return EFI_NOT_READY because this is not a safe environment for accessing shared
+ // hardware resource between SMM and normal mode.
+ //
+ if (AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED)) {
+ return EFI_SUCCESS;
+ } else {
+ return EFI_NOT_READY;
+ }
+}
+
+/**
+ Given timeout constraint, wait for all APs to arrive. If this function returns EFI_SUCCESS, then
+ no AP will execute normal mode code before entering SMM, so it is safe to access shared hardware resource
+ between SMM and normal mode. Note if there are SMI disabled APs, this function will return EFI_NOT_READY.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+
+ @retval EFI_SUCCESS No AP will execute normal mode code before entering SMM, so it is safe to access
+ shared hardware resource between SMM and normal mode
+ @retval EFI_NOT_READY One or more CPUs may still execute normal mode code
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSyncCheckApArrival (
+ IN SMM_CPU_SYNC_PROTOCOL *This
+ )
+{
+ return SmmCpuSyncCheckApArrivalWorker();
+}
+
+/**
+ Given timeout constraint, wait for all APs to arrive, and insure if this function returns EFI_SUCCESS, then
+ no AP will execute normal mode code before entering SMM, so it is safe to access shared hardware resource
+ between SMM and normal mode. Note if there are SMI disabled APs, this function will return EFI_NOT_READY.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+
+ @retval EFI_SUCCESS No AP will execute normal mode code before entering SMM, so it is safe to access
+ shared hardware resource between SMM and normal mode
+ @retval EFI_NOT_READY One or more CPUs may still execute normal mode code
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2CheckApArrival (
+ IN SMM_CPU_SYNC2_PROTOCOL *This
+ )
+{
+ return SmmCpuSyncCheckApArrivalWorker();
+}
+
+/**
+ Set SMM synchronization mode starting from the next SMI run.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[in] SyncMode The SMM synchronization mode to be set and effective from the next SMI run.
+
+ @retval EFI_SUCCESS The function completes successfully.
+ @retval EFI_INVALID_PARAMETER SynMode is not valid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSyncSetMode (
+ IN SMM_CPU_SYNC_PROTOCOL *This,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ return SmmSyncSetModeWorker(SyncMode);
+}
+
+/**
+ Set SMM synchronization mode starting from the next SMI run.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[in] SyncMode The SMM synchronization mode to be set and effective from the next SMI run.
+
+ @retval EFI_SUCCESS The function completes successfully.
+ @retval EFI_INVALID_PARAMETER SynMode is not valid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2SetMode (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ return SmmSyncSetModeWorker(SyncMode);
+}
+
+/**
+ Get current effective SMM synchronization mode.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[out] SyncMode Output parameter. The current effective SMM synchronization mode.
+
+ @retval EFI_SUCCESS The SMM synchronization mode has been retrieved successfully.
+ @retval EFI_INVALID_PARAMETER SyncMode is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSyncGetMode (
+ IN SMM_CPU_SYNC_PROTOCOL *This,
+ OUT SMM_CPU_SYNC_MODE *SyncMode
+ )
+{
+ return SmmSyncGetModeWorker(SyncMode);
+}
+
+/**
+ Get current effective SMM synchronization mode.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[out] SyncMode Output parameter. The current effective SMM synchronization mode.
+
+ @retval EFI_SUCCESS The SMM synchronization mode has been retrieved successfully.
+ @retval EFI_INVALID_PARAMETER SyncMode is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2GetMode (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ OUT SMM_CPU_SYNC_MODE *SyncMode
+ )
+{
+ return SmmSyncGetModeWorker(SyncMode);
+}
+
+/**
+ Checks CPU SMM synchronization state
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[in] CpuIndex Index of the CPU for which the state is to be retrieved.
+ @param[out] CpuState The state of the CPU
+
+ @retval EFI_SUCCESS Returns EFI_SUCCESS if the CPU's state is retrieved.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid or CpuState is NULL
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2CheckCpuState (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN UINTN CpuIndex,
+ OUT SMM_CPU_SYNC2_CPU_STATE *CpuState
+ )
+{
+ SMM_CPU_SYNC_FEATURE *SyncFeature;
+ UINT64 LogProcEnBit;
+
+ if (CpuIndex >= mMaxNumberOfCpus) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (CpuState == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ *CpuState = SmmCpuSync2StateNormal;
+ SyncFeature = &(gSmmCpuPrivate->SmmSyncFeature[CpuIndex]);
+ LogProcEnBit = SyncFeature->HaswellLogProcEnBit;
+
+ if (SyncFeature->DelayIndicationSupported && (SmmReadReg64 (CpuIndex, SmmRegSmmDelayed) & LogProcEnBit)) {
+ *CpuState = SmmCpuSync2StateDelayed;
+ } else if (SyncFeature->BlockIndicationSupported && (SmmReadReg64 (CpuIndex, SmmRegSmmBlocked) & LogProcEnBit)) {
+ *CpuState = SmmCpuSync2StateBlocked;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Change CPU's SMM enabling state.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[in] CpuIndex Index of the CPU to enable / disable SMM
+ @param[in] Enable If TRUE, enable SMM; if FALSE disable SMM
+
+ @retval EFI_SUCCESS The CPU's SMM enabling state is changed.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid
+ @retval EFI_UNSUPPORTED Returns EFI_UNSUPPORTED the CPU does not support dynamically enabling / disabling SMI
+ @retval EFI_DEVICE_ERROR Error occured in changing SMM enabling state.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2ChangeSmmEnabling (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN UINTN CpuIndex,
+ IN BOOLEAN Enable
+ )
+{
+ SMM_CPU_SYNC_FEATURE *SyncFeature;
+ UINT64 LogProcEnBit;
+
+ if (CpuIndex >= mMaxNumberOfCpus) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ SyncFeature = &(gSmmCpuPrivate->SmmSyncFeature[CpuIndex]);
+ LogProcEnBit = SyncFeature->HaswellLogProcEnBit;
+
+ return EFI_UNSUPPORTED;
+}
+
+/**
+ Send SMI IPI to a specific CPU.
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param[in] CpuIndex Index of the CPU to send SMI to.
+
+ @retval EFI_SUCCESS The SMI IPI is sent successfully.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid
+ @retval EFI_DEVICE_ERROR Error occured in sending SMI IPI.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2SendSmi (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN UINTN CpuIndex
+ )
+{
+ if (CpuIndex >= mMaxNumberOfCpus) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Send SMI IPI to all CPUs excluding self
+
+ @param[in] This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+
+ @retval EFI_SUCCESS The SMI IPI is sent successfully.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid
+ @retval EFI_DEVICE_ERROR Error occured in sending SMI IPI.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2SendSmiAllExcludingSelf (
+ IN SMM_CPU_SYNC2_PROTOCOL *This
+ )
+{
+ SendSmiIpiAllExcludingSelf();
+ return EFI_SUCCESS;
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
new file mode 100644
index 0000000000..d4ab88dff8
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -0,0 +1,1596 @@
+/** @file
+ Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+//
+// SMM CPU Private Data structure that contains SMM Configuration Protocol
+// along its supporting fields.
+//
+SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
+ SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
+ NULL, // SmmCpuHandle
+ { {0} }, // ProcessorInfo array
+ { SmmCpuNone }, // Operation array
+ { 0 }, // CpuSaveStateSize array
+ { NULL }, // CpuSaveState array
+ { 0 }, // TstateMsr array
+ { {0} }, // SmmSyncFeature array
+ { {0} }, // SmmReservedSmramRegion
+ {
+ SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
+ 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
+ 0, // SmmCoreEntryContext.NumberOfCpus
+ mSmmCpuPrivateData.CpuSaveStateSize, // SmmCoreEntryContext.CpuSaveStateSize
+ mSmmCpuPrivateData.CpuSaveState // SmmCoreEntryContext.CpuSaveStateSize
+ },
+ NULL, // SmmCoreEntry
+ {
+ mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
+ RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
+ },
+};
+
+CPU_HOT_PLUG_DATA mCpuHotPlugData = {
+ CPU_HOT_PLUG_DATA_REVISION_1, // Revision
+ 0, // Array Length of SmBase and APIC ID
+ NULL, // Pointer to APIC ID array
+ NULL, // Pointer to SMBASE array
+ 0, // IEDBase
+ 0, // SmrrBase
+ 0 // SmrrSize
+};
+
+//
+// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
+//
+SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
+
+//
+// SMM Reloc variables
+//
+volatile BOOLEAN mRebased[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+volatile BOOLEAN mIsBsp;
+
+///
+/// Handle for the SMM CPU Protocol
+///
+EFI_HANDLE mSmmCpuHandle = NULL;
+
+///
+/// SMM CPU Protocol instance
+///
+EFI_SMM_CPU_PROTOCOL mSmmCpu = {
+ SmmReadSaveState,
+ SmmWriteSaveState
+};
+
+EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
+
+///
+/// SMM CPU Sync Protocol instance
+///
+SMM_CPU_SYNC_PROTOCOL mSmmCpuSync = {
+ SmmCpuSyncCheckApArrival,
+ SmmCpuSyncSetMode,
+ SmmCpuSyncGetMode
+};
+
+///
+/// SMM CPU Sync2 Protocol instance
+///
+SMM_CPU_SYNC2_PROTOCOL mSmmCpuSync2 = {
+ SmmCpuSync2CheckApArrival,
+ SmmCpuSync2SetMode,
+ SmmCpuSync2GetMode,
+ SmmCpuSync2CheckCpuState,
+ SmmCpuSync2ChangeSmmEnabling,
+ SmmCpuSync2SendSmi,
+ SmmCpuSync2SendSmiAllExcludingSelf
+};
+
+///
+/// SMM CPU Save State Protocol instance
+///
+EFI_SMM_CPU_SAVE_STATE_PROTOCOL mSmmCpuSaveState = {
+ (EFI_SMM_CPU_STATE **)mSmmCpuPrivateData.CpuSaveState
+};
+
+//
+// SMM stack information
+//
+UINTN mSmmStackArrayBase;
+UINTN mSmmStackArrayEnd;
+UINTN mSmmStackSize;
+
+//
+// Pointer to structure used during S3 Resume
+//
+SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
+
+UINTN mMaxNumberOfCpus = 1;
+UINTN mNumberOfCpus = 1;
+
+//
+// SMM ready to lock flag
+//
+BOOLEAN mSmmReadyToLock = FALSE;
+
+///
+/// Macro used to simplfy the lookup table entries of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY
+///
+#define SMM_CPU_OFFSET(Field) OFFSET_OF (SOCKET_LGA_775_SMM_CPU_STATE, Field)
+
+///
+/// Macro used to simplfy the lookup table entries of type CPU_SMM_SAVE_STATE_REGISTER_RANGE
+///
+#define SMM_REGISTER_RANGE(Start, End) { Start, End, End - Start + 1 }
+
+///
+/// Table used by GetRegisterIndex() to convert an EFI_SMM_SAVE_STATE_REGISTER
+/// value to an index into a table of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY
+///
+CONST CPU_SMM_SAVE_STATE_REGISTER_RANGE mSmmCpuRegisterRanges[] = {
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_GDTBASE, EFI_SMM_SAVE_STATE_REGISTER_LDTINFO),
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_ES, EFI_SMM_SAVE_STATE_REGISTER_RIP),
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_RFLAGS, EFI_SMM_SAVE_STATE_REGISTER_CR4),
+ { (EFI_SMM_SAVE_STATE_REGISTER)0, (EFI_SMM_SAVE_STATE_REGISTER)0, 0 }
+};
+
+///
+/// Lookup table used to retrieve the widths and offsets associated with each
+/// supported EFI_SMM_SAVE_STATE_REGISTER value
+///
+CONST CPU_SMM_SAVE_STATE_LOOKUP_ENTRY mSmmCpuWidthOffset[] = {
+ {0, 0, 0, 0, 0, FALSE}, // Reserved
+
+ //
+ // Internally defined CPU Save State Registers. Not defined in PI SMM CPU Protocol.
+ //
+ {4, 4, SMM_CPU_OFFSET (x86.SMMRevId) , SMM_CPU_OFFSET (x64.SMMRevId) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX = 1
+ {4, 4, SMM_CPU_OFFSET (x86.IOMisc) , SMM_CPU_OFFSET (x64.IOMisc) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_IOMISC_INDEX = 2
+ {4, 8, SMM_CPU_OFFSET (x86.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) + 4, FALSE}, // SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX = 3
+ {4, 4, SMM_CPU_OFFSET (x64.SMBASE) , SMM_CPU_OFFSET (x64.SMBASE) , 0 , TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_SMBASE = 11
+
+ //
+ // CPU Save State registers defined in PI SMM CPU Protocol.
+ //
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.GdtBaseLoDword) , SMM_CPU_OFFSET (x64.GdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTBASE = 4
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.IdtBaseLoDword) , SMM_CPU_OFFSET (x64.IdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTBASE = 5
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.LdtBaseLoDword) , SMM_CPU_OFFSET (x64.LdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTBASE = 6
+ {0, 4, 0 , SMM_CPU_OFFSET (x64.GdtLimit) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTLIMIT = 7
+ {0, 4, 0 , SMM_CPU_OFFSET (x64.IdtLimit) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTLIMIT = 8
+ {0, 4, 0 , SMM_CPU_OFFSET (x64.LdtLimit) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTLIMIT = 9
+ {0, 4, 0 , SMM_CPU_OFFSET (x64.LdtInfo) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTINFO = 10
+
+ {4, 4, SMM_CPU_OFFSET (x86._ES) , SMM_CPU_OFFSET (x64._ES) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_ES = 20
+ {4, 4, SMM_CPU_OFFSET (x86._CS) , SMM_CPU_OFFSET (x64._CS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CS = 21
+ {4, 4, SMM_CPU_OFFSET (x86._SS) , SMM_CPU_OFFSET (x64._SS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_SS = 22
+ {4, 4, SMM_CPU_OFFSET (x86._DS) , SMM_CPU_OFFSET (x64._DS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DS = 23
+ {4, 4, SMM_CPU_OFFSET (x86._FS) , SMM_CPU_OFFSET (x64._FS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_FS = 24
+ {4, 4, SMM_CPU_OFFSET (x86._GS) , SMM_CPU_OFFSET (x64._GS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GS = 25
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._LDTR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTR_SEL = 26
+ {4, 4, SMM_CPU_OFFSET (x86._TR) , SMM_CPU_OFFSET (x64._TR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_TR_SEL = 27
+ {4, 8, SMM_CPU_OFFSET (x86._DR7) , SMM_CPU_OFFSET (x64._DR7) , SMM_CPU_OFFSET (x64._DR7) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR7 = 28
+ {4, 8, SMM_CPU_OFFSET (x86._DR6) , SMM_CPU_OFFSET (x64._DR6) , SMM_CPU_OFFSET (x64._DR6) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR6 = 29
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R8) , SMM_CPU_OFFSET (x64._R8) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R8 = 30
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R9) , SMM_CPU_OFFSET (x64._R9) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R9 = 31
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R10) , SMM_CPU_OFFSET (x64._R10) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R10 = 32
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R11) , SMM_CPU_OFFSET (x64._R11) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R11 = 33
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R12) , SMM_CPU_OFFSET (x64._R12) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R12 = 34
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R13) , SMM_CPU_OFFSET (x64._R13) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R13 = 35
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R14) , SMM_CPU_OFFSET (x64._R14) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R14 = 36
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R15) , SMM_CPU_OFFSET (x64._R15) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R15 = 37
+ {4, 8, SMM_CPU_OFFSET (x86._EAX) , SMM_CPU_OFFSET (x64._RAX) , SMM_CPU_OFFSET (x64._RAX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RAX = 38
+ {4, 8, SMM_CPU_OFFSET (x86._EBX) , SMM_CPU_OFFSET (x64._RBX) , SMM_CPU_OFFSET (x64._RBX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBX = 39
+ {4, 8, SMM_CPU_OFFSET (x86._ECX) , SMM_CPU_OFFSET (x64._RCX) , SMM_CPU_OFFSET (x64._RCX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RCX = 40
+ {4, 8, SMM_CPU_OFFSET (x86._EDX) , SMM_CPU_OFFSET (x64._RDX) , SMM_CPU_OFFSET (x64._RDX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDX = 41
+ {4, 8, SMM_CPU_OFFSET (x86._ESP) , SMM_CPU_OFFSET (x64._RSP) , SMM_CPU_OFFSET (x64._RSP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSP = 42
+ {4, 8, SMM_CPU_OFFSET (x86._EBP) , SMM_CPU_OFFSET (x64._RBP) , SMM_CPU_OFFSET (x64._RBP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBP = 43
+ {4, 8, SMM_CPU_OFFSET (x86._ESI) , SMM_CPU_OFFSET (x64._RSI) , SMM_CPU_OFFSET (x64._RSI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSI = 44
+ {4, 8, SMM_CPU_OFFSET (x86._EDI) , SMM_CPU_OFFSET (x64._RDI) , SMM_CPU_OFFSET (x64._RDI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDI = 45
+ {4, 8, SMM_CPU_OFFSET (x86._EIP) , SMM_CPU_OFFSET (x64._RIP) , SMM_CPU_OFFSET (x64._RIP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RIP = 46
+
+ {4, 8, SMM_CPU_OFFSET (x86._EFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RFLAGS = 51
+ {4, 8, SMM_CPU_OFFSET (x86._CR0) , SMM_CPU_OFFSET (x64._CR0) , SMM_CPU_OFFSET (x64._CR0) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR0 = 52
+ {4, 8, SMM_CPU_OFFSET (x86._CR3) , SMM_CPU_OFFSET (x64._CR3) , SMM_CPU_OFFSET (x64._CR3) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR3 = 53
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._CR4) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR4 = 54
+};
+
+///
+/// Lookup table for the IOMisc width information
+///
+CONST CPU_SMM_SAVE_STATE_IO_WIDTH mSmmCpuIoWidth[] = {
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 0
+ { 1, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // SMM_IO_LENGTH_BYTE = 1
+ { 2, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT16 }, // SMM_IO_LENGTH_WORD = 2
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 3
+ { 4, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT32 }, // SMM_IO_LENGTH_DWORD = 4
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 5
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 6
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 } // Undefined = 7
+};
+
+///
+/// Lookup table for the IOMisc type information
+///
+CONST EFI_SMM_SAVE_STATE_IO_TYPE mSmmCpuIoType[] = {
+ EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_DX = 0
+ EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_IN_DX = 1
+ EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_OUTS = 2
+ EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_INS = 3
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 4
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 5
+ EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_OUTS = 6
+ EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_INS = 7
+ EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 8
+ EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 9
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 10
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 11
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 12
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 13
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 14
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0 // Undefined = 15
+};
+
+/**
+ Read information from the CPU save state.
+
+ @param Register Specifies the CPU register to read form the save state.
+
+ @retval 0 Register is not valid
+ @retval >0 Index into mSmmCpuWidthOffset[] associated with Register
+
+**/
+UINTN
+GetRegisterIndex (
+ IN EFI_SMM_SAVE_STATE_REGISTER Register
+ )
+{
+ UINTN Index;
+ UINTN Offset;
+
+ for (Index = 0, Offset = SMM_SAVE_STATE_REGISTER_MAX_INDEX; mSmmCpuRegisterRanges[Index].Length != 0; Index++) {
+ if (Register >= mSmmCpuRegisterRanges[Index].Start && Register <= mSmmCpuRegisterRanges[Index].End) {
+ return Register - mSmmCpuRegisterRanges[Index].Start + Offset;
+ }
+ Offset += mSmmCpuRegisterRanges[Index].Length;
+ }
+ return 0;
+}
+
+/**
+ Read a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the IA32 Cpu Save State Map
+ or x64 Cpu Save State Map or a CPU Save State MSR.
+
+ This function supports reading a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
+ @retval EFI_UNSUPPORTED The register has no corresponding MSR.
+
+**/
+EFI_STATUS
+ReadSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN UINTN RegisterIndex,
+ IN UINTN Width,
+ OUT VOID *Buffer
+ )
+{
+ UINT32 SmmRevId;
+ SOCKET_LGA_775_SMM_CPU_STATE *CpuSaveState;
+
+ CpuSaveState = NULL;
+
+ {
+ CpuSaveState = gSmst->CpuSaveState[CpuIndex];
+ SmmRevId = CpuSaveState->x86.SMMRevId;
+ }
+
+ if (RegisterIndex == SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX) {
+ *(UINT32 *)Buffer = SmmRevId;
+ return EFI_SUCCESS;
+ }
+
+ if (SmmRevId < SOCKET_LGA_775_SMM_MIN_REV_ID_x64) {
+ //
+ // If 32-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Write return buffer
+ //
+ ASSERT (CpuSaveState != NULL);
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Width);
+ } else {
+ //
+ // If 64-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ {
+ //
+ // Write lower 32-bits of return buffer
+ //
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, MIN (4, Width));
+ if (Width >= 4) {
+ //
+ // Write upper 32-bits of return buffer
+ //
+ CopyMem((UINT8 *)Buffer + 4, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, Width - 4);
+ }
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Write value to a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the IA32 Cpu Save State Map
+ or x64 Cpu Save State Map or a CPU Save State MSR.
+
+ This function supports writing a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[in] Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written to Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct.
+ @retval EFI_UNSUPPORTED The register is read-only, cannot be written, or has no corresponding MSR.
+
+**/
+EFI_STATUS
+WriteSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN UINTN RegisterIndex,
+ IN UINTN Width,
+ IN CONST VOID *Buffer
+ )
+{
+ UINT32 SmmRevId;
+ SOCKET_LGA_775_SMM_CPU_STATE *CpuSaveState;
+
+ CpuSaveState = NULL;
+
+ //
+ // Do not write non-writeable SaveState.
+ //
+ if (!mSmmCpuWidthOffset[RegisterIndex].Writeable) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Get SMMRevId first.
+ //
+ {
+ CpuSaveState = gSmst->CpuSaveState[CpuIndex];
+ SmmRevId = CpuSaveState->x86.SMMRevId;
+ }
+
+ //
+ // Check CPU mode
+ //
+ if (SmmRevId < SOCKET_LGA_775_SMM_MIN_REV_ID_x64) {
+ //
+ // If 32-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {
+ return EFI_INVALID_PARAMETER;
+ }
+ //
+ // Write SMM State register
+ //
+ ASSERT (CpuSaveState != NULL);
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Buffer, Width);
+ } else {
+ //
+ // If 64-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ {
+ //
+ // Write lower 32-bits of SMM State register
+ //
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, Buffer, MIN (4, Width));
+ if (Width >= 4) {
+ //
+ // Write upper 32-bits of SMM State register
+ //
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, (UINT8 *)Buffer + 4, Width - 4);
+ }
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Read information from the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Width The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to read form the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state.
+ @param Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmReadSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ OUT VOID *Buffer
+ )
+{
+ UINT32 SmmRevId;
+ SOCKET_LGA_775_SMM_CPU_STATE_IOMISC IoMisc;
+ EFI_SMM_SAVE_STATE_IO_INFO *IoInfo;
+ UINTN RegisterIndex;
+ VOID *IoMemAddr;
+
+ //
+ // Retrieve pointer to the specified CPU's SMM Save State buffer
+ //
+ if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
+ //
+ // The pseudo-register only supports the 64-bit size specified by Width.
+ //
+ if (Width != sizeof (UINT64)) {
+ return EFI_INVALID_PARAMETER;
+ }
+ //
+ // If the processor is in SMM at the time the SMI occurred,
+ // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
+ // Otherwise, EFI_NOT_FOUND is returned.
+ //
+ if (mSmmMpSyncData->CpuData[CpuIndex].Present) {
+ *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
+ return EFI_SUCCESS;
+ } else {
+ return EFI_NOT_FOUND;
+ }
+ }
+
+ if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_LMA
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {
+ //
+ // Only byte access is supported for this register
+ //
+ if (Width != 1) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ ReadSaveStateRegister (CpuIndex, SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX, sizeof (SmmRevId), &SmmRevId);
+
+ if ((SmmRevId < SOCKET_LGA_775_SMM_MIN_REV_ID_x64)) {
+ *(UINT8 *)Buffer = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
+ } else {
+ *(UINT8 *)Buffer = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
+ }
+
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_IO
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {
+ ReadSaveStateRegister (CpuIndex, SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX, sizeof (SmmRevId), &SmmRevId);
+
+ //
+ // See if the CPU supports the IOMisc register in the save state
+ //
+ if (SmmRevId < SOCKET_LGA_775_SMM_MIN_REV_ID_IOMISC) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Get the IOMisc register value
+ //
+ ReadSaveStateRegister (CpuIndex, SMM_SAVE_STATE_REGISTER_IOMISC_INDEX, sizeof (IoMisc.Uint32), &IoMisc.Uint32);
+
+ //
+ // Check for the SMI_FLAG in IOMisc
+ //
+ if (IoMisc.Bits.SmiFlag == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Compute index for the I/O Length and I/O Type lookup tables
+ //
+ if (mSmmCpuIoWidth[IoMisc.Bits.Length].Width == 0 || mSmmCpuIoType[IoMisc.Bits.Type] == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Zero the IoInfo structure that will be returned in Buffer
+ //
+ IoInfo = (EFI_SMM_SAVE_STATE_IO_INFO *)Buffer;
+ ZeroMem (IoInfo, sizeof (EFI_SMM_SAVE_STATE_IO_INFO));
+
+ //
+ // Use lookup tables to help fill in all the fields of the IoInfo structure
+ //
+ IoInfo->IoPort = (UINT16)IoMisc.Bits.Port;
+ IoInfo->IoWidth = mSmmCpuIoWidth[IoMisc.Bits.Length].IoWidth;
+ IoInfo->IoType = mSmmCpuIoType[IoMisc.Bits.Type];
+ if (IoInfo->IoType == EFI_SMM_SAVE_STATE_IO_TYPE_INPUT || IoInfo->IoType == EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT) {
+ ReadSaveStateRegister (CpuIndex, GetRegisterIndex (EFI_SMM_SAVE_STATE_REGISTER_RAX), mSmmCpuIoWidth[IoMisc.Bits.Length].Width, &IoInfo->IoData);
+ } else {
+ ReadSaveStateRegister (CpuIndex, SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX, sizeof (IoMemAddr), &IoMemAddr);
+ CopyMem (&IoInfo->IoData, IoMemAddr, mSmmCpuIoWidth[IoMisc.Bits.Length].Width);
+ }
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Convert Register to a register lookup table index
+ //
+ RegisterIndex = GetRegisterIndex (Register);
+ if (RegisterIndex == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ return ReadSaveStateRegister (CpuIndex, RegisterIndex, Width, Buffer);
+}
+
+/**
+ Write data to the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Width The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to write to the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state
+ @param Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWriteSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ IN CONST VOID *Buffer
+ )
+{
+ UINTN RegisterIndex;
+
+ //
+ // Retrieve pointer to the specified CPU's SMM Save State buffer
+ //
+ if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
+ return EFI_SUCCESS;
+ }
+
+ if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_LMA are ignored
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_IO are not supported
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Convert Register to a register lookup table index
+ //
+ RegisterIndex = GetRegisterIndex (Register);
+ if (RegisterIndex == 0) {
+ //
+ // If Register is not valie, then return EFI_NOT_FOUND
+ //
+ return EFI_NOT_FOUND;
+ }
+
+ return WriteSaveStateRegister (CpuIndex, RegisterIndex, Width, Buffer);
+}
+
+/**
+ C function for SMI handler. To change all processor's SMMBase Register.
+
+**/
+VOID
+EFIAPI
+SmmInitHandler (
+ VOID
+ )
+{
+ UINT32 ApicId;
+ UINTN Index;
+ IA32_DESCRIPTOR X64Idtr;
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
+
+ //
+ // Set up X64 IDT table
+ //
+ ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * EXCEPTION_VECTOR_NUMBER);
+ X64Idtr.Base = (UINTN) IdtEntryTable;
+ X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * EXCEPTION_VECTOR_NUMBER - 1);
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
+
+ ApicId = GetApicId ();
+
+ ASSERT (mNumberOfCpus <= FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber));
+
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
+ SmmInitiFeatures (Index, mCpuHotPlugData.SmrrBase, mCpuHotPlugData.SmrrSize, (UINT32)mCpuHotPlugData.SmBase[Index], mIsBsp);
+
+ if (!mIsBsp) {
+ SemaphoreHook (Index);
+ } else {
+ //
+ // BSP rebase is already done above.
+ // Initialize private data during S3 resume
+ //
+ InitializeMpSyncData ();
+ }
+ return;
+ }
+ }
+ ASSERT (FALSE);
+}
+
+/**
+ Relocate SmmBases for each processor.
+
+ Execute on first boot and all S3 resumes
+
+**/
+VOID
+EFIAPI
+SmmRelocateBases (
+ VOID
+ )
+{
+ UINT8 BakBuf[BACK_BUF_SIZE];
+ SOCKET_LGA_775_SMM_CPU_STATE BakBuf2;
+ SOCKET_LGA_775_SMM_CPU_STATE *CpuStatePtr;
+ UINT8 *U8Ptr;
+ UINT32 ApicId;
+ UINTN Index;
+
+ //
+ // Make sure the reserved size is large enough for procedure SmmInitTemplate.
+ //
+ ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
+
+ //
+ // Patch ASM code template with current CR0, CR3, and CR4 values
+ //
+ gSmmCr0 = (UINT32)AsmReadCr0 ();
+ gSmmCr3 = (UINT32)AsmReadCr3 ();
+ gSmmCr4 = (UINT32)AsmReadCr4 ();
+
+ U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
+ CpuStatePtr = (SOCKET_LGA_775_SMM_CPU_STATE *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_CPU_STATE_OFFSET);
+
+ //
+ // Backup original contents @ 0x38000
+ //
+ CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
+ CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
+
+ //
+ // Load image for relocation
+ //
+ CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
+
+ //
+ // Retrieve the local APIC ID of current processor
+ //
+ ApicId = GetApicId ();
+
+ //
+ // Relocate SM bases for all APs
+ // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overriden by gcSmmInitTemplate
+ //
+ mIsBsp = FALSE;
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
+ mRebased[Index] = FALSE;
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
+
+ //
+ // Wait for this AP to finish its 1st SMI
+ //
+ while (!mRebased[Index]);
+ }
+ }
+
+ //
+ // Relocate BSP's SM base
+ //
+ mIsBsp = TRUE;
+ SendSmiIpi (ApicId);
+
+ //
+ // Restore contents @ 0x38000
+ //
+ CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
+ CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
+}
+
+/**
+ Perform SMM initialization for all processors in the S3 boot path.
+
+ For a native platform, MP initialization in the S3 boot path is also performed in this function.
+**/
+VOID
+SmmRestoreCpu (
+ VOID
+ )
+{
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;
+ IA32_DESCRIPTOR Ia32Idtr;
+ IA32_DESCRIPTOR X64Idtr;
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
+
+ DEBUG ((EFI_D_ERROR, "SmmRestoreCpu()\n"));
+
+ //
+ // See if there is enough context to resume PEI Phase
+ //
+ if (mSmmS3ResumeState == NULL) {
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
+ CpuDeadLoop ();
+ }
+
+ SmmS3ResumeState = mSmmS3ResumeState;
+ ASSERT (SmmS3ResumeState != NULL);
+
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
+ //
+ // Save the IA32 IDT Descriptor
+ //
+ AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
+
+ //
+ // Setup X64 IDT table
+ //
+ ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
+ X64Idtr.Base = (UINTN) IdtEntryTable;
+ X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
+
+
+ //
+ // Initialize Debug Agent to support source level debug
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
+ }
+
+ //
+ // Do below CPU things for native platform only
+ //
+ if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {
+ //
+ // First time microcode load and restore MTRRs
+ //
+ // TODO EarlyInitializeCpu ();
+ }
+
+ //
+ // Restore SMBASE for BSP and all APs
+ //
+ SmmRelocateBases ();
+
+ //
+ // Do below CPU things for native platform only
+ //
+ if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) {
+ //
+ // Restore MSRs for BSP and all APs
+ //
+ // TODO InitializeCpu ();
+ }
+
+ if (mSmmCodeAccessCheckEnable || FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
+ //
+ // Set a flag to restore SMM configuration in S3 path.
+ //
+ mRestoreSmmConfigurationInS3 = TRUE;
+ }
+
+ DEBUG (( EFI_D_ERROR, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
+ DEBUG (( EFI_D_ERROR, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
+ DEBUG (( EFI_D_ERROR, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
+ DEBUG (( EFI_D_ERROR, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
+ DEBUG (( EFI_D_ERROR, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
+
+ //
+ // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
+ //
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
+ DEBUG ((EFI_D_ERROR, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
+
+ SwitchStack (
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
+ );
+ }
+
+ //
+ // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
+ //
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
+ DEBUG ((EFI_D_ERROR, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
+ //
+ // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
+ //
+ SaveAndSetDebugTimerInterrupt (FALSE);
+ //
+ // Restore IA32 IDT table
+ //
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
+ AsmDisablePaging64 (
+ SmmS3ResumeState->ReturnCs,
+ (UINT32)SmmS3ResumeState->ReturnEntryPoint,
+ (UINT32)SmmS3ResumeState->ReturnContext1,
+ (UINT32)SmmS3ResumeState->ReturnContext2,
+ (UINT32)SmmS3ResumeState->ReturnStackPointer
+ );
+ }
+
+ //
+ // Can not resume PEI Phase
+ //
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
+ CpuDeadLoop ();
+}
+
+/**
+ Copy register table from ACPI NVS memory into SMRAM.
+
+ @param[in] DestinationRegisterTableList Points to destination register table.
+ @param[in] SourceRegisterTableList Points to source register table.
+ @param[in] NumberOfCpus Number of CPUs.
+
+**/
+VOID
+CopyRegisterTable (
+ IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
+ IN CPU_REGISTER_TABLE *SourceRegisterTableList,
+ IN UINT32 NumberOfCpus
+ )
+{
+ UINTN Index;
+
+ CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
+ for (Index = 0; Index < NumberOfCpus; Index++) {
+ DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize);
+ ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL);
+ if (DestinationRegisterTableList[Index].RegisterTableEntry == NULL) {
+ return;
+ }
+ CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize);
+ }
+}
+
+/**
+ Smm Ready To Lock event notification handler.
+
+ The CPU S3 data is copied to SMRAM for security. SMM Code Access Check feature is enabled if available.
+
+ @param[in] Protocol Points to the protocol's unique identifier.
+ @param[in] Interface Points to the interface instance.
+ @param[in] Handle The handle on which the interface was installed.
+
+ @retval EFI_SUCCESS Notification handler runs successfully.
+ **/
+EFI_STATUS
+EFIAPI
+SmmReadyToLockEventNotify (
+ IN CONST EFI_GUID *Protocol,
+ IN VOID *Interface,
+ IN EFI_HANDLE Handle
+ )
+{
+ ACPI_CPU_DATA *AcpiCpuData;
+ IA32_DESCRIPTOR *Gdtr;
+ IA32_DESCRIPTOR *Idtr;
+
+ //
+ // For a native platform, copy the CPU S3 data into SMRAM for security.
+ //
+ if (!FeaturePcdGet (PcdFrameworkCompatibilitySupport) && (PcdGet64 (PcdCpuS3DataAddress) != 0)) {
+ //
+ // Get CPU S3 data address
+ //
+ AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
+
+ //
+ // Copy CPU S3 data into SMRAM for use on CPU S3 Resume.
+ //
+ CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
+
+ mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
+ ASSERT (mAcpiCpuData.MtrrTable != 0);
+
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
+
+ mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
+ ASSERT (mAcpiCpuData.GdtrProfile != 0);
+
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
+
+ mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
+ ASSERT (mAcpiCpuData.IdtrProfile != 0);
+
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
+
+ mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
+ ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
+
+ CopyRegisterTable (
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
+ mAcpiCpuData.NumberOfCpus
+ );
+
+ mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
+ ASSERT (mAcpiCpuData.RegisterTable != 0);
+
+ CopyRegisterTable (
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
+ mAcpiCpuData.NumberOfCpus
+ );
+
+ //
+ // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
+ //
+ Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
+ Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
+
+ mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
+ ASSERT (mGdtForAp != NULL);
+ mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1));
+ mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1));
+
+ CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
+ CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
+ CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
+ }
+
+ //
+ // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
+ //
+ mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
+
+ //
+ // Set SMM ready to lock flag
+ //
+ mSmmReadyToLock = TRUE;
+ return EFI_SUCCESS;
+}
+
+/**
+ The module Entry Point of the CPU SMM driver.
+
+ @param ImageHandle The firmware allocated handle for the EFI image.
+ @param SystemTable A pointer to the EFI System Table.
+
+ @retval EFI_SUCCESS The entry point is executed successfully.
+ @retval Other Some error occurs when executing this entry point.
+
+**/
+EFI_STATUS
+EFIAPI
+PiCpuSmmEntry (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ EFI_STATUS Status;
+ EFI_MP_SERVICES_PROTOCOL *MpServices;
+ UINTN NumberOfEnabledProcessors;
+ UINTN Index;
+ VOID *Buffer;
+ UINTN TileSize;
+ VOID *GuidHob;
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;
+ UINT8 *Stacks;
+ VOID *Registration;
+
+ //
+ // Initialize Debug Agent to support source level debug in SMM code
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
+
+ //
+ // Report thhe start of CPU SMM initialization.
+ //
+ REPORT_STATUS_CODE (
+ EFI_PROGRESS_CODE,
+ EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
+ );
+
+ //
+ // Fix segment address of the long-mode-switch jmp
+ //
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ gSmmJmpAddr.Segment = 8;
+ }
+
+ //
+ // Find out SMRR Base and Size and IED Base
+ //
+ FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
+
+ //
+ // Get MP Services Protocol
+ //
+ Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
+ //
+ Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
+ ASSERT_EFI_ERROR (Status);
+ ASSERT (mNumberOfCpus <= FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber));
+
+ //
+ // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
+ // A constant BSP index makes no sense because it may be hot removed.
+ //
+ DEBUG_CODE (
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+
+ ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
+ }
+ );
+
+ //
+ // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
+ //
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
+ } else {
+ mMaxNumberOfCpus = mNumberOfCpus;
+ }
+ gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
+
+ //
+ // The CPU save state and code for the SMI entry point are tiled within an SMRAM
+ // allocated buffer. The miniumu size of this buffer for a uniprocessor system
+ // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
+ // just below SMBASE + 64KB. If more than one CPU is present in the platform,
+ // then the SMI entry point and the CPU save state areas can be tiles to minimize
+ // the total amount SMRAM required for all the CPUs. The tile size can be computed
+ // by adding the // CPU save state size, any extra CPU specific context, and
+ // the size of code that must be placed at the SMI entry point to transfer
+ // control to a C function in the native SMM execution mode. This size is
+ // rounded up to the nearest power of 2 to give the tile size for a each CPU.
+ // The total amount of memory required is the maximum number of CPUs that
+ // platform supports times the tile size. The picture below shows the tiling,
+ // where m is the number of tiles that fit in 32KB.
+ //
+ // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
+ // | CPU m+1 Save State |
+ // +-----------------------------+
+ // | CPU m+1 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 2m SMI Entry |
+ // +#############################+ <-- Base of allocated buffer + 64 KB
+ // | CPU m-1 Save State |
+ // +-----------------------------+
+ // | CPU m-1 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 2m-1 SMI Entry |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | . . . . . . . . . . . . |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | CPU 2 Save State |
+ // +-----------------------------+
+ // | CPU 2 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU m+1 SMI Entry |
+ // +=============================+ <-- Base of allocated buffer + 32 KB
+ // | CPU 1 Save State |
+ // +-----------------------------+
+ // | CPU 1 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU m SMI Entry |
+ // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
+ // | CPU 0 Save State |
+ // +-----------------------------+
+ // | CPU 0 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU m-1 SMI Entry |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | . . . . . . . . . . . . |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 1 SMI Entry |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 0 SMI Entry |
+ // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
+ //
+
+ //
+ // Compute tile size of buffer required to hold CPU save state, any extra CPU
+ // specific context, and the SMI entry point. This size of rounded up to
+ // nearest power of 2.
+ //
+ TileSize = 2 * GetPowerOfTwo32 (sizeof (SOCKET_LGA_775_SMM_CPU_STATE) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + gcSmiHandlerSize - 1);
+
+ //
+ // Allocate buffer for all of the tiles.
+ //
+ Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)));
+ ASSERT (Buffer != NULL);
+
+ //
+ // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
+ //
+ mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
+ ASSERT (mCpuHotPlugData.ApicId != NULL);
+ mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
+ ASSERT (mCpuHotPlugData.SmBase != NULL);
+ mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
+
+ //
+ // Retrieve APIC ID of each enabled processor from the MP Services protocol.
+ // Also compute the SMBASE address, CPU Save State address, and CPU Save state
+ // size for each CPU in the platform
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SIZE_32KB;
+ gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SOCKET_LGA_775_SMM_CPU_STATE);
+ gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SIZE_64KB - gSmmCpuPrivate->CpuSaveStateSize[Index]);
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+
+ if (Index < mNumberOfCpus) {
+ Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
+ ASSERT_EFI_ERROR (Status);
+ mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
+
+ DEBUG ((EFI_D_ERROR, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
+ Index,
+ (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
+ mCpuHotPlugData.SmBase[Index],
+ gSmmCpuPrivate->CpuSaveState[Index],
+ gSmmCpuPrivate->CpuSaveStateSize[Index]
+ ));
+ } else {
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
+ mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
+ }
+ }
+
+ //
+ // Allocate SMI stacks for all processors.
+ //
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // 2 more pages is allocated for each processor.
+ // one is guard page and the other is known good stack.
+ //
+ // +-------------------------------------------+-----+-------------------------------------------+
+ // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
+ // +-------------------------------------------+-----+-------------------------------------------+
+ // | | | |
+ // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
+ //
+ mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
+ Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
+ ASSERT (Stacks != NULL);
+ mSmmStackArrayBase = (UINTN)Stacks;
+ mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
+ } else {
+ mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
+ Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
+ ASSERT (Stacks != NULL);
+ }
+
+ //
+ // Set SMI stack for SMM base relocation
+ //
+ gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
+
+ //
+ // PcdGetXXX() functions can't be used in SmmRelocateBases() called in both normal and S3 boot path.
+ // Related PCD values are retrieved into global variables.
+ //
+
+ mSmmUseDelayIndication = PcdGetBool (PcdCpuSmmUseDelayIndication);
+ mSmmUseBlockIndication = PcdGetBool (PcdCpuSmmUseBlockIndication);
+
+ //
+ // Relocate SMM Base addresses to the ones allocated from SMRAM
+ // BUGBUG: Work on later
+ //
+ SmmRelocateBases ();
+
+ //
+ // SMM Init
+ // BUGBUG: Work on later
+ //
+ InitializeSmmTimer ();
+
+ //
+ // Initialize IDT
+ // BUGBUG: Work on later
+ //
+ InitializeIDT ();
+
+ SetMem (mExternalVectorTable, sizeof(mExternalVectorTable), 0);
+ //
+ // Set the pointer to the array of C based exception handling routines.
+ //
+ InitializeSmmExternalVectorTablePtr (mExternalVectorTable);
+
+ //
+ // Initialize MP globals
+ //
+ InitializeMpServiceData (ImageHandle, Stacks, mSmmStackSize);
+
+ //
+ // Fill in SMM Reserved Regions
+ //
+ gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
+ gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
+
+ //
+ // Install the SMM Configuration Protocol onto a new handle on the handle database.
+ // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
+ // to an SMRAM address will be present in the handle database
+ //
+ Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
+ &gSmmCpuPrivate->SmmCpuHandle,
+ &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Install the SMM CPU Protocol into SMM protocol database
+ //
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mSmmCpuHandle,
+ &gEfiSmmCpuProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCpu
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Install Smm CPU Sync protocol
+ //
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mSmmCpuHandle,
+ &gSmmCpuSyncProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCpuSync
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Install Smm CPU Sync2 protocol
+ //
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mSmmCpuHandle,
+ &gSmmCpuSync2ProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCpuSync2
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
+ //
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ PcdSet64 (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
+ }
+
+ //
+ // Initialize SMM CPU Services Support
+ //
+ Status = InitializeSmmCpuServices (mSmmCpuHandle);
+ ASSERT_EFI_ERROR (Status);
+
+ if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) {
+ //
+ // Install Framework SMM Save State Protocol into UEFI protocol database for backward compatibility
+ //
+ Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
+ &gSmmCpuPrivate->SmmCpuHandle,
+ &gEfiSmmCpuSaveStateProtocolGuid,
+ &mSmmCpuSaveState,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+ //
+ // The SmmStartupThisAp service in Framework SMST should always be non-null.
+ // Update SmmStartupThisAp pointer in PI SMST here so that PI/Framework SMM thunk
+ // can have it ready when constructing Framework SMST.
+ //
+ gSmst->SmmStartupThisAp = SmmStartupThisAp;
+ }
+
+ //
+ // register SMM Ready To Lock Protocol notification
+ //
+ Status = gSmst->SmmRegisterProtocolNotify (
+ &gEfiSmmReadyToLockProtocolGuid,
+ SmmReadyToLockEventNotify,
+ &Registration
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
+ if (GuidHob != NULL) {
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
+
+ DEBUG ((EFI_D_ERROR, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
+ DEBUG ((EFI_D_ERROR, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
+
+ SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
+ ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
+
+ mSmmS3ResumeState = SmmS3ResumeState;
+ SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
+
+ SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
+
+ SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
+ SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
+ if (SmmS3ResumeState->SmmS3StackBase == 0) {
+ SmmS3ResumeState->SmmS3StackSize = 0;
+ }
+
+ SmmS3ResumeState->SmmS3Cr0 = gSmmCr0;
+ SmmS3ResumeState->SmmS3Cr3 = gSmiCr3;
+ SmmS3ResumeState->SmmS3Cr4 = gSmmCr4;
+
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
+ }
+ if (sizeof (UINTN) == sizeof (UINT32)) {
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
+ }
+ }
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ InitSmmProfile ();
+ }
+ DEBUG ((EFI_D_ERROR, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Find out SMRAM information including SMRR base, SMRR size.
+
+ @param SmrrBase SMRR base
+ @param SmrrSize SMRR size
+
+**/
+VOID
+FindSmramInfo (
+ OUT UINT32 *SmrrBase,
+ OUT UINT32 *SmrrSize
+ )
+{
+ EFI_STATUS Status;
+ UINTN Size;
+ EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
+ EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
+ EFI_SMRAM_DESCRIPTOR *SmramRanges;
+ UINTN SmramRangeCount;
+ UINTN Index;
+ UINT64 MaxSize;
+ BOOLEAN Found;
+
+ //
+ // Get SMM Access Protocol
+ //
+ Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Get SMRAM information
+ //
+ Size = 0;
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+
+ SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
+ ASSERT (SmramRanges != NULL);
+
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges);
+ ASSERT_EFI_ERROR (Status);
+
+ SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
+
+ //
+ // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
+ //
+ CurrentSmramRange = NULL;
+ for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) {
+ //
+ // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
+ //
+ if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
+ continue;
+ }
+
+ if (SmramRanges[Index].CpuStart >= BASE_1MB) {
+ if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) {
+ if (SmramRanges[Index].PhysicalSize >= MaxSize) {
+ MaxSize = SmramRanges[Index].PhysicalSize;
+ CurrentSmramRange = &SmramRanges[Index];
+ }
+ }
+ }
+ }
+
+ ASSERT (CurrentSmramRange != NULL);
+
+ *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
+ *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
+
+ do {
+ Found = FALSE;
+ for (Index = 0; Index < SmramRangeCount; Index++) {
+ if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) {
+ *SmrrBase = (UINT32)SmramRanges[Index].CpuStart;
+ *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);
+ Found = TRUE;
+ } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) {
+ *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize);
+ Found = TRUE;
+ }
+ }
+ } while (Found);
+
+ DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
+}
+
+/**
+ Perform the remaining tasks.
+
+**/
+VOID
+PerformRemainingTasks (
+ VOID
+ )
+{
+ if (mSmmReadyToLock) {
+ //
+ // Start SMM Profile feature
+ //
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfileStart ();
+ }
+ //
+ // Configure SMM Code Access Check feature if available.
+ //
+ if (mSmmCodeAccessCheckEnable || FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
+ ConfigSmmCodeAccessCheck ();
+ }
+
+ //
+ // Clean SMM ready to lock flag
+ //
+ mSmmReadyToLock = FALSE;
+ }
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
new file mode 100644
index 0000000000..39c7ba5049
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -0,0 +1,780 @@
+/** @file
+ Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _CPU_PISMMCPUDXESMM_H_
+#define _CPU_PISMMCPUDXESMM_H_
+#include <PiSmm.h>
+#include <AcpiCpuData.h>
+
+#include <Protocol/MpService.h>
+#include <Protocol/SmmConfiguration.h>
+#include <Protocol/SmmCpu.h>
+#include <Protocol/SmmAccess2.h>
+#include <Protocol/SmmCpuSaveState.h>
+#include <Protocol/SmmReadyToLock.h>
+#include <Protocol/SmmCpuSync.h>
+#include <Protocol/SmmCpuSync2.h>
+#include <Protocol/SmmCpuService.h>
+
+#include <Guid/AcpiS3Context.h>
+
+#include <Library/BaseLib.h>
+#include <Library/IoLib.h>
+#include <Library/TimerLib.h>
+#include <Library/SmmLib.h>
+#include <Library/SynchronizationLib.h>
+#include <Library/DebugLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/PcdLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/MtrrLib.h>
+#include <Library/SocketLga775Lib.h>
+#include <Library/SmmCpuPlatformHookLib.h>
+#include <Library/SmmServicesTableLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/UefiRuntimeServicesTableLib.h>
+#include <Library/DebugAgentLib.h>
+#include <Library/HobLib.h>
+#include <Library/CpuConfigLib.h>
+#include <Library/LocalApicLib.h>
+#include <Library/UefiCpuLib.h>
+#include <Library/ReportStatusCodeLib.h>
+
+#include <CpuHotPlugData.h>
+
+#include "SmmFeatures.h"
+#include "CpuService.h"
+#include "SmmProfile.h"
+
+//
+// Size of Task-State Segment defined in IA32 Manual
+//
+#define TSS_SIZE 104
+#define TSS_X64_IST1_OFFSET 36
+#define TSS_IA32_CR3_OFFSET 28
+#define TSS_IA32_ESP_OFFSET 56
+
+//
+// The size 0x20 must be bigger than
+// the size of template code of SmmInit. Currently,
+// the size of SmmInit requires the 0x16 Bytes buffer
+// at least.
+//
+#define BACK_BUF_SIZE 0x20
+
+#define EXCEPTION_VECTOR_NUMBER 0x20
+
+#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL
+
+//
+// SMM CPU synchronization features available on a processor
+//
+typedef struct {
+ BOOLEAN DelayIndicationSupported;
+ BOOLEAN BlockIndicationSupported;
+ //
+ // This processor's LOG_PROC_EN bit used in SMM_DELAYED, and SMM_BLOCKED MSRs
+ // (introduced in Haswell processor).
+ // Value of "-1" indicates this field is invalid (i.e. LOG_PROC_EN bit is not
+ // supported)
+ //
+ UINT64 HaswellLogProcEnBit;
+} SMM_CPU_SYNC_FEATURE;
+
+typedef UINT32 SMM_CPU_ARRIVAL_EXCEPTIONS;
+#define ARRIVAL_EXCEPTION_BLOCKED 0x1
+#define ARRIVAL_EXCEPTION_DELAYED 0x2
+#define ARRIVAL_EXCEPTION_SMI_DISABLED 0x4
+
+//
+// Private structure for the SMM CPU module that is stored in DXE Runtime memory
+// Contains the SMM Configuration Protocols that is produced.
+// Contains a mix of DXE and SMM contents. All the fields must be used properly.
+//
+#define SMM_CPU_PRIVATE_DATA_SIGNATURE SIGNATURE_32 ('s', 'c', 'p', 'u')
+
+typedef struct {
+ UINTN Signature;
+
+ EFI_HANDLE SmmCpuHandle;
+
+ EFI_PROCESSOR_INFORMATION ProcessorInfo [FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+ SMM_CPU_OPERATION Operation [FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+ UINTN CpuSaveStateSize [FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+ VOID *CpuSaveState [FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+ UINT64 TstateMsr [FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+ SMM_CPU_SYNC_FEATURE SmmSyncFeature [FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+
+ EFI_SMM_RESERVED_SMRAM_REGION SmmReservedSmramRegion[1];
+ EFI_SMM_ENTRY_CONTEXT SmmCoreEntryContext;
+ EFI_SMM_ENTRY_POINT SmmCoreEntry;
+
+ EFI_SMM_CONFIGURATION_PROTOCOL SmmConfiguration;
+} SMM_CPU_PRIVATE_DATA;
+
+extern SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate;
+extern CPU_HOT_PLUG_DATA mCpuHotPlugData;
+extern UINTN mMaxNumberOfCpus;
+extern UINTN mNumberOfCpus;
+extern volatile BOOLEAN mRebased[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+extern UINT32 gSmbase;
+extern BOOLEAN mRestoreSmmConfigurationInS3;
+extern EFI_SMM_CPU_PROTOCOL mSmmCpu;
+
+//
+// SMM CPU Protocol function prototypes.
+//
+
+/**
+ Read information from the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Widthe The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to read form the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state
+ @param Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmReadSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ OUT VOID *Buffer
+ );
+
+/**
+ Write data to the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Widthe The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to write to the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state
+ @param Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWriteSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ IN CONST VOID *Buffer
+ );
+
+//
+// SMM CPU Sync Protocol and SMM CPU Sync2 Protocol function prototypes.
+//
+
+/**
+ Given timeout constraint, wait for all APs to arrive. If this function returns EFI_SUCCESS, then
+ no AP will execute normal mode code before entering SMM, so it is safe to access shared hardware resource
+ between SMM and normal mode. Note if there are SMI disabled APs, this function will return EFI_NOT_READY.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+
+ @retval EFI_SUCCESS No AP will execute normal mode code before entering SMM, so it is safe to access
+ shared hardware resource between SMM and normal mode
+ @retval EFI_NOT_READY One or more CPUs may still execute normal mode code
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSyncCheckApArrival (
+ IN SMM_CPU_SYNC_PROTOCOL *This
+ );
+
+/**
+ Given timeout constraint, wait for all APs to arrive. If this function returns EFI_SUCCESS, then
+ no AP will execute normal mode code before entering SMM, so it is safe to access shared hardware resource
+ between SMM and normal mode. Note if there are SMI disabled APs, this function will return EFI_NOT_READY.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+
+ @retval EFI_SUCCESS No AP will execute normal mode code before entering SMM, so it is safe to access
+ shared hardware resource between SMM and normal mode
+ @retval EFI_NOT_READY One or more CPUs may still execute normal mode code
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2CheckApArrival (
+ IN SMM_CPU_SYNC2_PROTOCOL *This
+ );
+
+/**
+ Set SMM synchronization mode starting from the next SMI run.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param SyncMode The SMM synchronization mode to be set and effective from the next SMI run.
+
+ @retval EFI_SUCCESS The function completes successfully.
+ @retval EFI_INVALID_PARAMETER SynMode is not valid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSyncSetMode (
+ IN SMM_CPU_SYNC_PROTOCOL *This,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ );
+
+/**
+ Set SMM synchronization mode starting from the next SMI run.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param SyncMode The SMM synchronization mode to be set and effective from the next SMI run.
+
+ @retval EFI_SUCCESS The function completes successfully.
+ @retval EFI_INVALID_PARAMETER SynMode is not valid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2SetMode (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ );
+
+/**
+ Get current effective SMM synchronization mode.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param SyncMode Output parameter. The current effective SMM synchronization mode.
+
+ @retval EFI_SUCCESS The SMM synchronization mode has been retrieved successfully.
+ @retval EFI_INVALID_PARAMETER SyncMode is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSyncGetMode (
+ IN SMM_CPU_SYNC_PROTOCOL *This,
+ OUT SMM_CPU_SYNC_MODE *SyncMode
+ );
+
+/**
+ Get current effective SMM synchronization mode.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param SyncMode Output parameter. The current effective SMM synchronization mode.
+
+ @retval EFI_SUCCESS The SMM synchronization mode has been retrieved successfully.
+ @retval EFI_INVALID_PARAMETER SyncMode is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2GetMode (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ OUT SMM_CPU_SYNC_MODE *SyncMode
+ );
+
+/**
+ Checks CPU SMM synchronization state
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param CpuIndex Index of the CPU for which the state is to be retrieved.
+ @param CpuState The state of the CPU
+
+ @retval EFI_SUCCESS Returns EFI_SUCCESS if the CPU's state is retrieved.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid or CpuState is NULL
+ @retval EFI_DEVICE_ERROR Error occured in obtaining CPU status.
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2CheckCpuState (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN UINTN CpuIndex,
+ OUT SMM_CPU_SYNC2_CPU_STATE *CpuState
+ );
+
+/**
+ Change CPU's SMM enabling state.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param CpuIndex Index of the CPU to enable / disable SMM
+ @param Enable If TRUE, enable SMM; if FALSE disable SMM
+
+ @retval EFI_SUCCESS The CPU's SMM enabling state is changed.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid
+ @retval EFI_UNSUPPORTED Returns EFI_UNSUPPORTED the CPU does not support dynamically enabling / disabling SMI
+ @retval EFI_DEVICE_ERROR Error occured in changing SMM enabling state.
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2ChangeSmmEnabling (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN UINTN CpuIndex,
+ IN BOOLEAN Enable
+ );
+
+/**
+ Send SMI IPI to a specific CPU.
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+ @param CpuIndex Index of the CPU to send SMI to.
+
+ @retval EFI_SUCCESS The SMI IPI is sent successfully.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid
+ @retval EFI_DEVICE_ERROR Error occured in sending SMI IPI.
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2SendSmi (
+ IN SMM_CPU_SYNC2_PROTOCOL *This,
+ IN UINTN CpuIndex
+ );
+
+/**
+ Send SMI IPI to all CPUs excluding self
+
+ @param This A pointer to the SMM_CPU_SYNC_PROTOCOL instance.
+
+ @retval EFI_SUCCESS The SMI IPI is sent successfully.
+ @retval EFI_INVALID_PARAMETER Returns EFI_INVALID_PARAMETER if CpuIndex is invalid
+ @retval EFI_DEVICE_ERROR Error occured in sending SMI IPI.
+**/
+EFI_STATUS
+EFIAPI
+SmmCpuSync2SendSmiAllExcludingSelf (
+ IN SMM_CPU_SYNC2_PROTOCOL *This
+ );
+
+///
+/// Structure used to describe a range of registers
+///
+typedef struct {
+ EFI_SMM_SAVE_STATE_REGISTER Start;
+ EFI_SMM_SAVE_STATE_REGISTER End;
+ UINTN Length;
+} CPU_SMM_SAVE_STATE_REGISTER_RANGE;
+
+///
+/// Structure used to build a lookup table to retrieve the widths and offsets
+/// associated with each supported EFI_SMM_SAVE_STATE_REGISTER value
+///
+
+#define SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX 1
+#define SMM_SAVE_STATE_REGISTER_IOMISC_INDEX 2
+#define SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX 3
+#define SMM_SAVE_STATE_REGISTER_MAX_INDEX 4
+
+typedef struct {
+ UINT8 Width32;
+ UINT8 Width64;
+ UINT16 Offset32;
+ UINT16 Offset64Lo;
+ UINT16 Offset64Hi;
+ BOOLEAN Writeable;
+} CPU_SMM_SAVE_STATE_LOOKUP_ENTRY;
+
+///
+/// Structure used to build a lookup table for the IOMisc width information
+///
+typedef struct {
+ UINT8 Width;
+ EFI_SMM_SAVE_STATE_IO_WIDTH IoWidth;
+} CPU_SMM_SAVE_STATE_IO_WIDTH;
+
+typedef struct {
+ UINT32 MsrIndex;
+ UINT64 MsrValue;
+} MSR_INFO;
+
+//
+//
+//
+typedef struct {
+ UINT32 Offset;
+ UINT16 Segment;
+ UINT16 Reserved;
+} IA32_FAR_ADDRESS;
+
+extern IA32_FAR_ADDRESS gSmmJmpAddr;
+
+extern CONST UINT8 gcSmmInitTemplate[];
+extern CONST UINT16 gcSmmInitSize;
+extern UINT32 gSmmCr0;
+extern UINT32 gSmmCr3;
+extern UINT32 gSmmCr4;
+extern UINTN gSmmInitStack;
+
+/**
+ Seamphore operation for all processor relocate SMMBase.
+**/
+VOID
+EFIAPI
+SmmRelocationSemaphoreComplete (
+ VOID
+ );
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param CpuIndex The processor index.
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex
+ );
+
+///
+/// The type of SMM CPU Information
+///
+typedef struct {
+ SPIN_LOCK Busy;
+ volatile EFI_AP_PROCEDURE Procedure;
+ volatile VOID *Parameter;
+ volatile UINT32 Run;
+ volatile BOOLEAN Present;
+} SMM_CPU_DATA_BLOCK;
+
+typedef struct {
+ SMM_CPU_DATA_BLOCK CpuData[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+ volatile UINT32 Counter;
+ volatile UINT32 BspIndex;
+ volatile BOOLEAN InsideSmm;
+ volatile BOOLEAN AllCpusInSync;
+ SMM_CPU_SYNC_MODE SyncModeToBeSet;
+ volatile SMM_CPU_SYNC_MODE EffectiveSyncMode;
+ volatile BOOLEAN SwitchBsp;
+ volatile BOOLEAN CandidateBsp[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+} SMM_DISPATCHER_MP_SYNC_DATA;
+
+extern IA32_DESCRIPTOR gcSmiGdtr;
+extern CONST IA32_DESCRIPTOR gcSmiIdtr;
+extern UINT32 gSmiCr3;
+extern volatile UINTN gSmiStack;
+extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd;
+extern volatile UINT8 gcSmiHandlerTemplate[];
+extern CONST UINT16 gcSmiHandlerSize;
+extern CONST UINT16 gcSmiHandlerOffset;
+extern UINT64 gPhyMask;
+extern ACPI_CPU_DATA mAcpiCpuData;
+extern SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData;
+extern VOID *mGdtForAp;
+extern VOID *mIdtForAp;
+extern VOID *mMachineCheckHandlerForAp;
+extern UINTN mSmmStackArrayBase;
+extern UINTN mSmmStackArrayEnd;
+extern UINTN mSmmStackSize;
+extern IA32_IDT_GATE_DESCRIPTOR gSavedPageFaultIdtEntry;
+extern IA32_IDT_GATE_DESCRIPTOR gSavedDebugExceptionIdtEntry;
+extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService;
+
+/**
+ Create 4G PageTable in SMRAM.
+
+ @param ExtraPages Additional page numbers besides for 4G memory
+ @return PageTable Address
+
+**/
+UINT32
+Gen4GPageTable (
+ IN UINTN ExtraPages
+ );
+
+/**
+ Initialize global data for MP synchronization.
+
+ @param ImageHandle File Image Handle.
+ @param Stacks Base address of SMI stack buffer for all processors.
+ @param StackSize Stack size for each processor in SMM.
+
+**/
+VOID
+InitializeMpServiceData (
+ IN EFI_HANDLE ImageHandle,
+ IN VOID *Stacks,
+ IN UINTN StackSize
+ );
+
+/**
+ Initialize Timer for Smm AP Sync.
+
+**/
+VOID
+InitializeSmmTimer (
+ VOID
+ );
+
+/**
+ Start Timer for Smm AP Sync.
+
+**/
+UINT64
+EFIAPI
+StartSyncTimer (
+ VOID
+ );
+
+/**
+ Check if the Smm AP Sync timer is timeout.
+
+ @param Timer The start timer from the begin.
+
+**/
+BOOLEAN
+EFIAPI
+IsSyncTimerTimeout (
+ IN UINT64 Timer
+ );
+
+/**
+ Initialize IDT for SM mode.
+
+**/
+VOID
+EFIAPI
+InitializeIDT (
+ VOID
+ );
+
+/**
+ Register the SMM Foundation entry point.
+
+ @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
+ @param SmmEntryPoint SMM Foundation EntryPoint
+
+ @retval EFI_SUCCESS Successfully to register SMM foundation entry point
+
+**/
+EFI_STATUS
+EFIAPI
+RegisterSmmEntry (
+ IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
+ IN EFI_SMM_ENTRY_POINT SmmEntryPoint
+ );
+
+/**
+ Create PageTable for SMM use.
+
+ @return PageTable Address
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ );
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param Procedure The address of the procedure to run
+ @param CpuIndex Target CPU number
+ @param ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS - The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ );
+
+/**
+ Schedule a procedure to run on the specified CPU in a blocking fashion.
+
+ @param Procedure The address of the procedure to run
+ @param CpuIndex Target CPU Index
+ @param ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmBlockingStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ );
+
+/**
+ Initialize MP synchronization data.
+
+**/
+VOID
+EFIAPI
+InitializeMpSyncData (
+ VOID
+ );
+
+/**
+ Find out SMRAM information including SMRR base, SMRR size.
+
+ @param SmrrBase SMRR base
+ @param SmrrSize SMRR size
+
+**/
+VOID
+FindSmramInfo (
+ OUT UINT32 *SmrrBase,
+ OUT UINT32 *SmrrSize
+ );
+
+/**
+ The function is invoked before SMBASE relocation in S3 path to restors CPU status.
+
+ The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
+ and restores MTRRs for both BSP and APs.
+
+**/
+VOID
+EarlyInitializeCpu (
+ VOID
+ );
+
+/**
+ The function is invoked after SMBASE relocation in S3 path to restors CPU status.
+
+ The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
+ data saved by normal boot path for both BSP and APs.
+
+**/
+VOID
+InitializeCpu (
+ VOID
+ );
+
+/**
+ Page Fault handler for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ );
+
+/**
+ Restore SMM Configuration for Haswell enhance SMM features in S3 boot path.
+**/
+VOID
+RestoreSmmConfigurationInS3 (
+ VOID
+ );
+
+/**
+ Read a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the IA32 Cpu Save State Map
+ or x64 Cpu Save State Map or a CPU Save State MSR.
+
+ This function supports reading a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
+ @retval EFI_UNSUPPORTED The register has no corresponding MSR.
+
+**/
+EFI_STATUS
+ReadSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN UINTN RegisterIndex,
+ IN UINTN Width,
+ OUT VOID *Buffer
+ );
+
+/**
+ Write value to a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the IA32 Cpu Save State Map
+ or x64 Cpu Save State Map or a CPU Save State MSR.
+
+ This function supports writing a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[in] Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written to Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct.
+ @retval EFI_UNSUPPORTED The register is read-only, cannot be written, or has no corresponding MSR.
+
+**/
+EFI_STATUS
+WriteSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN UINTN RegisterIndex,
+ IN UINTN Width,
+ IN CONST VOID *Buffer
+ );
+
+/**
+ Read information from the CPU save state.
+
+ @param Register Specifies the CPU register to read form the save state.
+
+ @retval 0 Register is not valid
+ @retval >0 Index into mSmmCpuWidthOffset[] associated with Register
+
+**/
+UINTN
+GetRegisterIndex (
+ IN EFI_SMM_SAVE_STATE_REGISTER Register
+ );
+
+/**
+ Perform the remaining tasks.
+
+**/
+VOID
+PerformRemainingTasks (
+ VOID
+ );
+
+#endif
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
new file mode 100644
index 0000000000..fd497ac2b8
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -0,0 +1,195 @@
+## @file
+# Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = PiSmmCpuDxeSmm
+ FILE_GUID = E730A001-4DE6-4901-BBE4-7265BE0FCD8D
+ MODULE_TYPE = DXE_SMM_DRIVER
+ VERSION_STRING = 1.0
+ PI_SPECIFICATION_VERSION = 0x0001000A
+ ENTRY_POINT = PiCpuSmmEntry
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = IA32 X64
+#
+
+[Sources]
+ PiSmmCpuDxeSmm.c
+ PiSmmCpuDxeSmm.h
+ SmmFeatures.c
+ SmmFeatures.h
+ MpService.c
+ SyncTimer.c
+ CpuS3.c
+ CpuService.c
+ CpuService.h
+ SmmProfile.c
+
+[Sources.Ia32]
+ Ia32/Semaphore.c
+ Ia32/PageTbl.c
+ Ia32/SmmProfileArch.c
+ Ia32/SmmInit.asm | MSFT
+ Ia32/SmiException.asm | MSFT
+ Ia32/SmiEntry.asm | MSFT
+ Ia32/MpFuncs.asm | MSFT
+
+ Ia32/SmmInit.asm | INTEL
+ Ia32/SmiException.asm | INTEL
+ Ia32/SmiEntry.asm | INTEL
+ Ia32/MpFuncs.asm | INTEL
+
+ Ia32/SmmInit.S | GCC
+ Ia32/SmiException.S | GCC
+ Ia32/SmiEntry.S | GCC
+ Ia32/MpFuncs.S | GCC
+
+[Sources.X64]
+ X64/Semaphore.c
+ X64/PageTbl.c
+ X64/SmmProfileArch.c
+ X64/SmmInit.asm | MSFT
+ X64/SmiException.asm | MSFT
+ X64/SmiEntry.asm | MSFT
+ X64/MpFuncs.asm | MSFT
+
+ X64/SmmInit.asm | INTEL
+ X64/SmiException.asm | INTEL
+ X64/SmiEntry.asm | INTEL
+ X64/MpFuncs.asm | INTEL
+
+ X64/SmmInit.S | GCC
+ X64/SmiException.S | GCC
+ X64/SmiEntry.S | GCC
+ X64/MpFuncs.S | GCC
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+ IntelFrameworkPkg/IntelFrameworkPkg.dec
+ BraswellPlatformPkg/BraswellPlatformPkg.dec
+
+[LibraryClasses]
+ UefiDriverEntryPoint
+ UefiRuntimeServicesTableLib
+ CacheMaintenanceLib
+ PcdLib
+ DebugLib
+ BaseLib
+ SynchronizationLib
+ BaseMemoryLib
+ MtrrLib
+ SmmLib
+ IoLib
+ TimerLib
+ SmmServicesTableLib
+ MemoryAllocationLib
+ DebugAgentLib
+ HobLib
+ CpuConfigLib
+ PciLib
+ LocalApicLib
+ UefiCpuLib
+ SmmCpuPlatformHookLib
+ UefiLib
+ DxeServicesTableLib
+ CpuLib
+ ReportStatusCodeLib
+
+[Protocols]
+ ## CONSUMES
+ gEfiSmmAccess2ProtocolGuid
+
+ ## CONSUMES
+ gEfiMpServiceProtocolGuid
+
+ ## PRODUCES
+ gEfiSmmConfigurationProtocolGuid
+
+ ## PRODUCES
+ gEfiSmmCpuProtocolGuid
+
+ ## NOTIFY
+ gEfiSmmReadyToLockProtocolGuid
+
+ ## PRODUCES
+ gSmmCpuSyncProtocolGuid
+
+ ## PRODUCES
+ gSmmCpuSync2ProtocolGuid
+
+ ## PRODUCES
+ gEfiSmmCpuServiceProtocolGuid
+
+ ## PRODUCES
+ gEfiSmmCpuSaveStateProtocolGuid
+
+[Guids]
+ ## SOMETIMES_CONSUMES ## HOB
+ gEfiAcpiVariableGuid
+
+ ## SOMETIMES_CONSUMES ## SystemTable
+ gEfiAcpi20TableGuid
+
+ ## SOMETIMES_CONSUMES ## SystemTable
+ gEfiAcpi10TableGuid
+
+[FeaturePcd]
+ gEfiMdeModulePkgTokenSpaceGuid.PcdFrameworkCompatibilitySupport ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmBlockStartupThisAp ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmEnableBspElection ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmUncacheCpuSyncData ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuHotPlugSupport ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmProfileEnable ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmProfileRingBuffer ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmFeatureControlMsrLock ## CONSUMES
+
+[FixedPcd]
+ ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber
+
+ ## CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmProfileSize
+
+[Pcd]
+ ## SOMETIMES_CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmStackSize
+
+ ## SOMETIMES_CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmApSyncTimeout
+
+ ## SOMETIMES_CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuS3DataAddress
+
+ ## SOMETIMES_PRODUCES
+ gEfiCpuTokenSpaceGuid.PcdCpuHotPlugDataAddress
+
+ ## SOMETIMES_CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable
+
+ ## SOMETIMES_CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmUseDelayIndication
+
+ ## SOMETIMES_CONSUMES
+ gEfiCpuTokenSpaceGuid.PcdCpuSmmUseBlockIndication
+
+[Depex]
+ gEfiMpServiceProtocolGuid
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.c
new file mode 100644
index 0000000000..2441d7e7a0
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.c
@@ -0,0 +1,492 @@
+/** @file
+ The CPU specific programming for PiSmmCpuDxeSmm module, such as SMRR, EMRR, IED.
+ Currently below CPUs are supported.
+
+ Copyright (c) 2010 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Base.h>
+
+#include <Library/PcdLib.h>
+#include <Library/BaseLib.h>
+#include <Library/CpuLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PciLib.h>
+#include <Library/LocalApicLib.h>
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmFeatures.h"
+
+//
+// The CPUID mapping for CherryView
+//
+
+CPUID_MAPPING mCherryViewMap[] = {
+ {CPUID_SIGNATURE_CHERRYVIEW, CPUID_MASK_NO_STEPPING},
+};
+
+//
+// The CLASS for CherryView
+//
+CPU_SMM_CLASS mCherryViewClass = {
+ CpuCherryView,
+ sizeof(mCherryViewMap)/sizeof(mCherryViewMap[0]),
+ mCherryViewMap,
+ };
+
+//
+// This table defines supported CPU class
+//
+CPU_SMM_CLASS *mCpuClasstable[] = {
+ &mCherryViewClass
+ };
+
+////////
+// Below section is common definition
+////////
+
+//
+// Assumes UP, or MP with identical feature set
+//
+CPU_SMM_FEATURE_CONTEXT mFeatureContext;
+
+CPU_SMM_CLASS *mThisCpu;
+BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
+BOOLEAN mSmmUseDelayIndication;
+BOOLEAN mSmmUseBlockIndication;
+
+/**
+ Return if SMRR is supported
+
+ @retval TRUE SMRR is supported.
+ @retval FALSE SMRR is not supported.
+
+**/
+BOOLEAN
+IsSmrrSupported (
+ VOID
+ )
+{
+ UINT64 MtrrCap;
+
+ MtrrCap = AsmReadMsr64(EFI_MSR_IA32_MTRR_CAP);
+ if ((MtrrCap & IA32_MTRR_SMRR_SUPPORT_BIT) == 0) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+}
+
+/**
+ Initialize SMRR in SMM relocate.
+
+ @param SmrrBase The base address of SMRR.
+ @param SmrrSize The size of SMRR.
+**/
+VOID
+InitSmrr (
+ IN UINT32 SmrrBase,
+ IN UINT32 SmrrSize
+ )
+{
+ AsmWriteMsr64 (EFI_MSR_SMRR_PHYS_BASE, SmrrBase| CACHE_WRITE_BACK);
+ AsmWriteMsr64 (EFI_MSR_SMRR_PHYS_MASK, (~(SmrrSize - 1) & EFI_MSR_SMRR_MASK)); // Valid bit will be set in ConfigSmrr() at first SMI
+}
+
+/**
+ Configure SMRR register at each SMM entry.
+**/
+VOID
+ConfigSmrr (
+ VOID
+ )
+{
+ UINT64 SmrrMask;
+
+ SmrrMask = AsmReadMsr64 (EFI_MSR_SMRR_PHYS_MASK);
+ if ((SmrrMask & EFI_MSR_SMRR_PHYS_MASK_VALID) == 0) {
+ AsmWriteMsr64(EFI_MSR_SMRR_PHYS_MASK, SmrrMask | EFI_MSR_SMRR_PHYS_MASK_VALID);
+ }
+}
+
+////////
+// Below section is definition for the supported class
+////////
+
+/**
+ This function will return current CPU_SMM_CLASS accroding to CPUID mapping.
+
+ @return The point to current CPU_SMM_CLASS
+
+**/
+CPU_SMM_CLASS *
+GetCpuFamily (
+ VOID
+ )
+{
+ UINT32 ClassIndex;
+ UINT32 Index;
+ UINT32 Count;
+ CPUID_MAPPING *CpuMapping;
+ UINT32 RegEax;
+
+ AsmCpuid (EFI_CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
+ for (ClassIndex = 0; ClassIndex < sizeof(mCpuClasstable)/sizeof(mCpuClasstable[0]); ClassIndex++) {
+ CpuMapping = mCpuClasstable[ClassIndex]->MappingTable;
+ Count = mCpuClasstable[ClassIndex]->MappingCount;
+ for (Index = 0; Index < Count; Index++) {
+ if ((CpuMapping[Index].Signature & CpuMapping[Index].Mask) == (RegEax & CpuMapping[Index].Mask)) {
+ return mCpuClasstable[ClassIndex];
+ }
+ }
+ }
+
+ // Not found!!! Should not happen
+ ASSERT (FALSE);
+ return NULL;
+}
+
+////////
+// Below section is external function
+////////
+/**
+ Read MSR or CSR based on the CPU type Register to read.
+
+ NOTE: Since platform may uses I/O ports 0xCF8 and 0xCFC to access
+ CSR, we need to use SPIN_LOCK to avoid collision on MP System.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RegName Register name.
+
+ @return 64-bit value read from register.
+
+**/
+UINT64
+SmmReadReg64 (
+ IN UINTN CpuIndex,
+ IN SMM_REG_NAME RegName
+ )
+{
+ UINT64 RetVal;
+
+ RetVal = 0;
+ switch (RegName) {
+ //
+ // Client uses MSR
+ //
+ case SmmRegFeatureControl:
+ RetVal = AsmReadMsr64 (EFI_MSR_HASWELL_SMM_FEATURE_CONTROL);
+ break;
+ case SmmRegSmmDelayed:
+ RetVal = AsmReadMsr64 (EFI_MSR_HASWELL_SMM_DELAYED);
+ break;
+ case SmmRegSmmBlocked:
+ RetVal = AsmReadMsr64 (EFI_MSR_HASWELL_SMM_BLOCKED);
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+ return RetVal;
+}
+
+/**
+ Write MSR or CSR based on the CPU type Register to write.
+
+ NOTE: Since platform may uses I/O ports 0xCF8 and 0xCFC to access
+ CSR, we need to use SPIN_LOCK to avoid collision on MP System.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RegName Register name.
+ @param[in] RegValue 64-bit Register value.
+
+**/
+VOID
+SmmWriteReg64 (
+ IN UINTN CpuIndex,
+ IN SMM_REG_NAME RegName,
+ IN UINT64 RegValue
+ )
+{
+ switch (RegName) {
+ //
+ // Client uses MSR
+ //
+ case SmmRegFeatureControl:
+ AsmWriteMsr64 (EFI_MSR_HASWELL_SMM_FEATURE_CONTROL, RegValue);
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+}
+
+/**
+ This function will return logical processor index in package.
+
+ @param[in] ProcessorNumber The processor number.
+ @param[out] LogProcIndexPackage The logical processor index.
+
+ @retval EFI_NOT_FOUND Cannot find the specified processor by ProcessorNumber.
+ @retval EFI_SUCCESS Logical processor index return in LogProcIndexPackage.
+
+**/
+EFI_STATUS
+GetLogProcIndexInPackage (
+ IN UINTN ProcessorNumber,
+ OUT UINT16 *LogProcIndexPackage
+ )
+{
+ UINT64 ProcessorId;
+ UINT32 PackageId;
+ UINTN Index;
+ UINT16 LogProcIndex;
+
+ ProcessorId = gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId;
+ if (ProcessorId == INVALID_APIC_ID) {
+ return EFI_NOT_FOUND;
+ }
+
+ PackageId = gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].Location.Package;
+ LogProcIndex = 0;
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].Location.Package == PackageId) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId < ProcessorId) {
+ //
+ // The logical processor number in the same packet
+ //
+ LogProcIndex++;
+ }
+ }
+ }
+
+ *LogProcIndexPackage = LogProcIndex;
+ return EFI_SUCCESS;
+}
+
+/**
+ Return if it is needed to configure MTRR to set TSEG cacheability.
+
+ @retval TRUE - we need configure MTRR
+ @retval FALSE - we do not need configure MTRR
+**/
+BOOLEAN
+NeedConfigureMtrrs (
+ VOID
+ )
+{
+ ASSERT (mThisCpu != NULL);
+
+ switch (mThisCpu->Family) {
+ case CpuCherryView:
+ return FALSE;
+ default:
+ return TRUE;
+ }
+}
+
+/**
+ Processor specific hook point at each SMM entry.
+
+ @param CpuIndex The index of the cpu which need to check.
+
+**/
+VOID
+SmmRendezvousEntry (
+ IN UINTN CpuIndex
+ )
+{
+
+ ASSERT (mThisCpu != NULL);
+
+ switch (mThisCpu->Family) {
+ case CpuCherryView:
+ if (mFeatureContext.SmrrEnabled) {
+ ConfigSmrr ();
+ }
+ return;
+ default:
+ return ;
+ }
+}
+
+/**
+ Processor specific hook point at each SMM exit.
+
+ @param CpuIndex The index of the cpu which need to check.
+**/
+VOID
+SmmRendezvousExit (
+ IN UINTN CpuIndex
+ )
+{
+
+ ASSERT (mThisCpu != NULL);
+
+ switch (mThisCpu->Family) {
+ case CpuCherryView:
+ default:
+ return ;
+ }
+}
+
+/**
+ Initialize SMRR context in SMM Init.
+**/
+VOID
+InitializeSmmMtrrManager (
+ VOID
+ )
+{
+ mThisCpu = GetCpuFamily ();
+ ASSERT (mThisCpu != NULL);
+
+ switch (mThisCpu->Family) {
+ case CpuCherryView:
+ if (!IsSmrrSupported ()) {
+ return ;
+ }
+ mFeatureContext.SmrrEnabled = TRUE;
+ return ;
+ default:
+ return ;
+ }
+}
+
+/**
+ Initialize SMRR/SMBASE/SMM Sync features in SMM Relocate.
+
+ @param ProcessorNumber The processor number
+ @param SmrrBase The base address of SMRR.
+ @param SmrrSize The size of SMRR.
+ @param SmBase The SMBASE value.
+ @param IsBsp If this processor treated as BSP.
+**/
+VOID
+SmmInitiFeatures (
+ IN UINTN ProcessorNumber,
+ IN UINT32 SmrrBase,
+ IN UINT32 SmrrSize,
+ IN UINT32 SmBase,
+ IN BOOLEAN IsBsp
+ )
+{
+ SOCKET_LGA_775_SMM_CPU_STATE *CpuState;
+ SMM_CPU_SYNC_FEATURE *SyncFeature;
+
+ SyncFeature = &(gSmmCpuPrivate->SmmSyncFeature[ProcessorNumber]);
+ SyncFeature->DelayIndicationSupported = FALSE;
+ SyncFeature->BlockIndicationSupported = FALSE;
+ SyncFeature->HaswellLogProcEnBit = (UINT64)(INT64)(-1);
+
+ mThisCpu = GetCpuFamily ();
+ ASSERT (mThisCpu != NULL);
+
+ //
+ // Configure SMBASE.
+ //
+ switch (mThisCpu->Family) {
+ case CpuCherryView:
+ //
+ // Fall back to legacy SMBASE setup.
+ //
+ CpuState = (SOCKET_LGA_775_SMM_CPU_STATE *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_CPU_STATE_OFFSET);
+ CpuState->x86.SMBASE = SmBase;
+ break ;
+ default:
+ return ;
+ }
+
+ switch (mThisCpu->Family) {
+ case CpuCherryView:
+ if (IsSmrrSupported ()) {
+ InitSmrr (SmrrBase, SmrrSize);
+ }
+ return ;
+ default:
+ ASSERT (FALSE);
+ return ;
+ }
+}
+
+/**
+ Configure SMM Code Access Check feature on an AP.
+ SMM Feature Control MSR will be locked after configuration.
+
+ @param[in,out] Buffer Pointer to private data buffer.
+**/
+VOID
+EFIAPI
+ConfigSmmCodeAccessCheckOnCurrentProcessor (
+ IN OUT VOID *Buffer
+ )
+{
+ UINT64 SmmFeatureControlMsr;
+ UINTN CpuIndex;
+
+ CpuIndex = *(UINTN *)Buffer;
+
+ SmmFeatureControlMsr = SmmReadReg64 (CpuIndex, SmmRegFeatureControl);
+ //
+ // The SMM Feature Control MSR is package scope. If lock bit is set, don't set it again.
+ //
+ if ((SmmFeatureControlMsr & SMM_FEATURE_CONTROL_LOCK_BIT) == 0) {
+ if (mSmmCodeAccessCheckEnable) {
+ SmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
+ }
+ if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
+ SmmFeatureControlMsr |=SMM_FEATURE_CONTROL_LOCK_BIT;
+ }
+ SmmWriteReg64 (CpuIndex, SmmRegFeatureControl, SmmFeatureControlMsr);
+ }
+}
+
+/**
+ Configure SMM Code Access Check feature for all processors.
+ SMM Feature Control MSR will be locked after configuration.
+**/
+VOID
+ConfigSmmCodeAccessCheck (
+ VOID
+ )
+{
+ UINTN Index;
+ EFI_STATUS Status;
+
+ //
+ // SMM Code Access Check feature is supported since Haswell.
+ //
+ if (FALSE /*mThisCpu->Family == CpuHaswell*/) {
+ if ((AsmReadMsr64 (EFI_MSR_HASWELL_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
+ mSmmCodeAccessCheckEnable = FALSE;
+ if (!FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
+ return;
+ }
+ }
+ //
+ // Enable SMM Code Access Check feature for the BSP.
+ //
+ ConfigSmmCodeAccessCheckOnCurrentProcessor (NULL);
+ //
+ // Enable SMM Code Access Check feature for the APs.
+ //
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
+ //
+ // Don't call gSmst->SmmStartupThisAp() because it may be implemented in a blocking or non-blocking fashion.
+ //
+ Status = SmmBlockingStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, NULL);
+ ASSERT_EFI_ERROR (Status);
+ }
+ }
+ }
+}
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.h
new file mode 100644
index 0000000000..2eabc82e50
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmFeatures.h
@@ -0,0 +1,220 @@
+/** @file
+ The CPU specific programming for PiSmmCpuDxeSmm module.
+
+ Copyright (c) 2010 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __SMM_FEATURES_H__
+#define __SMM_FEATURES_H__
+
+////////
+// Below definition is from IA32 SDM
+////////
+#define EFI_CPUID_VERSION_INFO 0x1
+#define EFI_CPUID_CORE_TOPOLOGY 0x0B
+#define EFI_CPUID_EXTENDED_FUNCTION 0x80000000
+#define EFI_CPUID_VIR_PHY_ADDRESS_SIZE 0x80000008
+
+#define EFI_MSR_IA32_MTRR_CAP 0xFE
+#define IA32_MTRR_SMRR_SUPPORT_BIT BIT11
+
+#define EFI_MSR_IA32_FEATURE_CONTROL 0x3A
+#define IA32_SMRR_ENABLE_BIT BIT3
+
+#define EFI_MSR_SMRR_PHYS_MASK_VALID BIT11
+#define EFI_MSR_SMRR_MASK 0xFFFFF000
+
+#define EFI_MSR_SMRR_PHYS_BASE 0x1F2
+#define EFI_MSR_SMRR_PHYS_MASK 0x1F3
+#define CACHE_WRITE_PROTECT 5
+#define CACHE_WRITE_BACK 6
+#define SMM_DEFAULT_SMBASE 0x30000
+
+#define EFI_MSR_HASWELL_SMM_MCA_CAP 0x17D
+#define SMM_CODE_ACCESS_CHK_BIT BIT58
+#define LONG_FLOW_INDICATION_BIT BIT59
+
+#define EFI_MSR_HASWELL_SMM_FEATURE_CONTROL 0x4E0
+#define SMM_FEATURE_CONTROL_LOCK_BIT BIT0
+#define SMM_CODE_CHK_EN_BIT BIT2
+
+#define SMM_HASWELL_CLIENT_LOG_PROC_EN_BIT_LENGTH 12
+
+#define EFI_MSR_HASWELL_SMM_DELAYED 0x4E2
+
+#define EFI_MSR_HASWELL_SMM_BLOCKED 0x4E3
+
+////////
+// Below section is definition for CPU SMM Feature context
+////////
+
+//
+// Structure to describe CPU identification mapping
+// if ((CPUID_EAX(1) & Mask) == (Signature & Mask)), it means matched.
+//
+typedef struct {
+ UINT32 Signature;
+ UINT32 Mask;
+} CPUID_MAPPING;
+
+//
+// CPU SMM familiy
+//
+typedef enum {
+ CpuCherryView,
+ CpuSmmFamilyMax
+} CPU_SMM_FAMILY;
+
+//
+// Structure to describe CPU SMM class
+//
+typedef struct {
+ CPU_SMM_FAMILY Family;
+ UINT32 MappingCount;
+ CPUID_MAPPING *MappingTable;
+} CPU_SMM_CLASS;
+
+//
+// Structure to describe CPU_SMM_FEATURE_CONTEXT
+//
+typedef struct {
+ BOOLEAN SmrrEnabled;
+} CPU_SMM_FEATURE_CONTEXT;
+
+//
+// ATOM CPUID signatures
+//
+#define CPUID_SIGNATURE_CHERRYVIEW 0x000406C0
+
+//
+// CPUID masks
+//
+#define CPUID_MASK_NO_STEPPING 0x0FFF0FF0
+#define CPUID_MASK_NO_STEPPING_MODEL 0x0FFF0F00
+
+//
+// Enumerate registers which differ between client and server
+//
+typedef enum {
+ SmmRegFeatureControl,
+ SmmRegSmmDelayed,
+ SmmRegSmmBlocked
+} SMM_REG_NAME;
+
+extern BOOLEAN mSmmCodeAccessCheckEnable;
+extern BOOLEAN mSmmUseDelayIndication;
+extern BOOLEAN mSmmUseBlockIndication;
+extern CPU_SMM_CLASS *mThisCpu;
+
+/**
+ Return if it is needed to configure MTRR to set TSEG cacheability.
+
+ @retval TRUE - we need configure MTRR
+ @retval FALSE - we do not need configure MTRR
+**/
+BOOLEAN
+NeedConfigureMtrrs (
+ VOID
+ );
+
+/**
+ Processor specific hook point at each SMM entry.
+
+ @param CpuIndex The index of the cpu which need to check.
+**/
+VOID
+SmmRendezvousEntry (
+ IN UINTN CpuIndex
+ );
+
+/**
+ Processor specific hook point at each SMM exit.
+
+ @param CpuIndex The index of the cpu which need to check.
+**/
+VOID
+SmmRendezvousExit (
+ IN UINTN CpuIndex
+ );
+
+/**
+ Initialize SMRR context in SMM Init.
+**/
+VOID
+InitializeSmmMtrrManager (
+ VOID
+ );
+
+/**
+ Initialize SMRR/SMBASE/SMM Sync features in SMM Relocate.
+
+ @param ProcessorNumber The processor number
+ @param SmrrBase The base address of SMRR.
+ @param SmrrSize The size of SMRR.
+ @param SmBase The SMBASE value.
+ @param IsBsp If this processor treated as BSP.
+**/
+VOID
+SmmInitiFeatures (
+ IN UINTN ProcessorNumber,
+ IN UINT32 SmrrBase,
+ IN UINT32 SmrrSize,
+ IN UINT32 SmBase,
+ IN BOOLEAN IsBsp
+ );
+
+/**
+ Configure SMM Code Access Check feature for all processors.
+ SMM Feature Control MSR will be locked after configuration.
+**/
+VOID
+ConfigSmmCodeAccessCheck (
+ VOID
+ );
+
+/**
+ Read MSR or CSR based on the CPU type Register to read.
+
+ NOTE: Since platform may uses I/O ports 0xCF8 and 0xCFC to access
+ CSR, we need to use SPIN_LOCK to avoid collision on MP System.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RegName Register name.
+
+ @return 64-bit value read from register.
+
+**/
+UINT64
+SmmReadReg64 (
+ IN UINTN CpuIndex,
+ IN SMM_REG_NAME RegName
+ );
+
+/**
+ Write MSR or CSR based on the CPU type Register to write.
+
+ NOTE: Since platform may uses I/O ports 0xCF8 and 0xCFC to access
+ CSR, we need to use SPIN_LOCK to avoid collision on MP System.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RegName Register name.
+ @param[in] RegValue 64-bit Register value.
+
+**/
+VOID
+SmmWriteReg64 (
+ IN UINTN CpuIndex,
+ IN SMM_REG_NAME RegName,
+ IN UINT64 RegValue
+ );
+
+#endif
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.c
new file mode 100644
index 0000000000..b781d2032b
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.c
@@ -0,0 +1,1369 @@
+/** @file
+ Enable SMM profile.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+#define MSR_IA32_MISC_ENABLE 0x1A0
+
+SMM_PROFILE_HEADER *mSmmProfileBase;
+MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
+//
+// The buffer to store SMM profile data.
+//
+UINTN mSmmProfileSize = FixedPcdGet32 (PcdCpuSmmProfileSize);
+
+//
+// The buffer to enable branch trace store.
+//
+UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
+
+//
+// The flag indicates if execute-disable is supported by processor.
+//
+BOOLEAN mXdSupported = FALSE;
+
+//
+// The flag indicates if BTS is supported by processor.
+//
+BOOLEAN mBtsSupported = FALSE;
+
+//
+// The flag indicates if SMM profile starts to record data.
+//
+BOOLEAN mSmmProfileStart = FALSE;
+
+//
+// Record the page fault exception count for one instruction execution.
+//
+UINTN mPFEntryCount[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+
+UINT64 mLastPFEntryValue[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)][MAX_PF_ENTRY_COUNT];
+UINT64 *mLastPFEntryPointer[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)][MAX_PF_ENTRY_COUNT];
+
+MSR_DS_AREA_STRUCT *mMsrDsArea[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+BRANCH_TRACE_RECORD *mMsrBTSRecord[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+UINTN mBTSRecordNumber;
+PEBS_RECORD *mMsrPEBSRecord[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+
+//
+// These memory ranges are always present, they does not generate the access type of page fault exception,
+// but they possibly generate instruction fetch type of page fault exception.
+//
+MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
+UINTN mProtectionMemRangeCount = 0;
+
+//
+// Some pre-defined memory ranges.
+//
+MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
+ //
+ // SMRAM range (to be fixed in runtime).
+ // It is always present and instruction fetches are allowed.
+ //
+ {{0x00000000, 0x00000000},TRUE,FALSE},
+
+ //
+ // SMM profile data range( to be fixed in runtime).
+ // It is always present and instruction fetches are not allowed.
+ //
+ {{0x00000000, 0x00000000},TRUE,TRUE},
+
+ //
+ // Future exended range could be added here.
+ //
+
+ //
+ // PCI MMIO ranges (to be added in runtime).
+ // They are always present and instruction fetches are not allowed.
+ //
+};
+
+//
+// These memory ranges are mapped by 4KB-page instead of 2MB-page.
+//
+MEMORY_RANGE *mSplitMemRange = NULL;
+UINTN mSplitMemRangeCount = 0;
+
+//
+// SMI command port.
+//
+UINT32 mSmiCommandPort;
+
+/**
+ Disable branch trace store.
+
+**/
+VOID
+DisableBTS (
+ VOID
+ )
+{
+ AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
+}
+
+/**
+ Enable branch trace store.
+
+**/
+VOID
+EnableBTS (
+ VOID
+ )
+{
+ AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
+}
+
+/**
+ Get CPU Index from Apic ID.
+
+**/
+UINTN
+GetCpuIndex (
+ VOID
+ )
+{
+ UINTN Index;
+ UINT32 ApicId;
+
+ ApicId = GetApicId ();
+
+ for (Index = 0; Index < FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
+ return Index;
+ }
+ }
+ ASSERT (FALSE);
+ return 0;
+}
+
+/**
+ Get the source of IP after execute-disable exception is triggered.
+
+ @param CpuIndex The index of CPU.
+ @param DestinationIP The destination address.
+
+**/
+UINT64
+GetSourceFromDestinationOnBts (
+ UINTN CpuIndex,
+ UINT64 DestinationIP
+ )
+{
+ BRANCH_TRACE_RECORD *CurrentBTSRecord;
+ UINTN Index;
+ BOOLEAN FirstMatch;
+
+ FirstMatch = FALSE;
+
+ CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
+ for (Index = 0; Index < mBTSRecordNumber; Index++) {
+ if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
+ //
+ // Underflow
+ //
+ CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
+ CurrentBTSRecord --;
+ }
+ if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
+ //
+ // Good! find 1st one, then find 2nd one.
+ //
+ if (!FirstMatch) {
+ //
+ // The first one is DEBUG exception
+ //
+ FirstMatch = TRUE;
+ } else {
+ //
+ // Good find proper one.
+ //
+ return CurrentBTSRecord->LastBranchFrom;
+ }
+ }
+ CurrentBTSRecord--;
+ }
+
+ return 0;
+}
+
+/**
+ SMM profile specific INT 1 (single-step) exception handler.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+DebugExceptionHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN CpuIndex;
+ UINTN PFEntry;
+
+ if (!mSmmProfileStart) {
+ return;
+ }
+ CpuIndex = GetCpuIndex ();
+
+ //
+ // Clear last PF entries
+ //
+ for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
+ *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
+ }
+
+ //
+ // Reset page fault exception count for next page fault.
+ //
+ mPFEntryCount[CpuIndex] = 0;
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+}
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+ @param Nx The flag indicates if the memory is execute-disable.
+
+**/
+BOOLEAN
+IsAddressValid (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN *Nx
+ )
+{
+ UINTN Index;
+
+ //
+ // Check config
+ //
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
+ if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
+ *Nx = mProtectionMemRange[Index].Nx;
+ return mProtectionMemRange[Index].Present;
+ }
+ }
+
+ //
+ // Return default
+ //
+ *Nx = FALSE;
+ return FALSE;
+}
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+
+**/
+BOOLEAN
+IsAddressSplit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINTN Index;
+
+ //
+ // Check config
+ //
+ for (Index = 0; Index < mSplitMemRangeCount; Index++) {
+ if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
+ return TRUE;
+ }
+ }
+
+ //
+ // Return default
+ //
+ return FALSE;
+}
+
+/**
+ Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
+
+**/
+VOID
+InitProtectedMemRange (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN NumberOfDescriptors;
+ UINTN NumberOfMmioDescriptors;
+ UINTN NumberOfProtectRange;
+ UINTN NumberOfSpliteRange;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
+ UINTN TotalSize;
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
+ EFI_PHYSICAL_ADDRESS ProtectEndAddress;
+ EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
+ EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
+ UINT64 High4KBPageSize;
+ UINT64 Low4KBPageSize;
+
+ NumberOfDescriptors = 0;
+ NumberOfMmioDescriptors = 0;
+ NumberOfSpliteRange = 0;
+ MemorySpaceMap = NULL;
+
+ //
+ // Get MMIO ranges from GCD and add them into protected memory ranges.
+ //
+ Status = gDS->GetMemorySpaceMap (
+ &NumberOfDescriptors,
+ &MemorySpaceMap
+ );
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {
+ if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
+ NumberOfMmioDescriptors++;
+ }
+ }
+
+ if (NumberOfMmioDescriptors != 0) {
+ TotalSize = NumberOfMmioDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
+ mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
+ ASSERT (mProtectionMemRange != NULL);
+ mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
+
+ //
+ // Copy existing ranges.
+ //
+ CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
+
+ //
+ // Create split ranges which come from protected ranges.
+ //
+ TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
+ mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
+ ASSERT (mSplitMemRange != NULL);
+
+ //
+ // Create MMIO ranges which are set to present and execution-disable.
+ //
+ NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {
+ if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
+ continue;
+ }
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
+ mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
+ NumberOfProtectRange++;
+ }
+ }
+
+ //
+ // According to protected ranges, create the ranges which will be mapped by 2KB page.
+ //
+ NumberOfSpliteRange = 0;
+ NumberOfProtectRange = mProtectionMemRangeCount;
+ for (Index = 0; Index < NumberOfProtectRange; Index++) {
+ //
+ // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
+ //
+ ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
+ ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
+ if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
+ //
+ // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
+ // A mix of 4KB and 2MB page could save SMRAM space.
+ //
+ Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
+ Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
+ ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
+ //
+ // There is an range which could be mapped by 2MB-page.
+ //
+ High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
+ Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
+ if (High4KBPageSize != 0) {
+ //
+ // Add not 2MB-aligned range to be mapped by 4KB-page.
+ //
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ NumberOfSpliteRange++;
+ }
+ if (Low4KBPageSize != 0) {
+ //
+ // Add not 2MB-aligned range to be mapped by 4KB-page.
+ //
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ NumberOfSpliteRange++;
+ }
+ } else {
+ //
+ // The range could only be mapped by 4KB-page.
+ //
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ NumberOfSpliteRange++;
+ }
+ }
+ }
+
+ mSplitMemRangeCount = NumberOfSpliteRange;
+
+ DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
+ DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
+ DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
+ }
+ for (Index = 0; Index < mSplitMemRangeCount; Index++) {
+ DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
+ DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
+ }
+}
+
+/**
+ Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
+
+**/
+VOID
+InitPaging (
+ VOID
+ )
+{
+ UINT64 *Pml4;
+ UINT64 *Pde;
+ UINT64 *Pte;
+ UINT64 *Pt;
+ UINTN Address;
+ UINTN Level1;
+ UINTN Level2;
+ UINTN Level3;
+ UINTN Level4;
+ UINTN NumberOfPdpEntries;
+ UINTN NumberOfPml4Entries;
+ UINTN SizeOfMemorySpace;
+ BOOLEAN Nx;
+
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ Pml4 = (UINT64*)(UINTN)gSmiCr3;
+ SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
+ //
+ // Calculate the table entries of PML4E and PDPTE.
+ //
+ if (SizeOfMemorySpace <= 39 ) {
+ NumberOfPml4Entries = 1;
+ NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
+ } else {
+ NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
+ NumberOfPdpEntries = 512;
+ }
+ } else {
+ NumberOfPml4Entries = 1;
+ NumberOfPdpEntries = 4;
+ }
+
+ //
+ // Go through page table and change 2MB-page into 4KB-page.
+ //
+ for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);
+ } else {
+ Pde = (UINT64*)(UINTN)gSmiCr3;
+ }
+ for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
+ Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);
+ if (Pte == 0) {
+ continue;
+ }
+ for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
+ Address = (((Level2 << 9) + Level3) << 21);
+
+ //
+ // if it is 2M page, check IsAddressSplit()
+ //
+ if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
+ //
+ // Based on current page table, create 4KB page table for split area.
+ //
+ ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));
+
+ Pt = AllocatePages (1);
+ ASSERT (Pt != NULL);
+
+ *Pte = (UINTN)Pt | IA32_PG_RW | IA32_PG_P;
+
+ // Split it
+ for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
+ *Pt = Address + ((Level4 << 12) | IA32_PG_RW | IA32_PG_P);
+ } // end for PT
+ } // end if IsAddressSplit
+ } // end for PTE
+ } // end for PDE
+ }
+
+ //
+ // Go through page table and set several page table entries to absent or execute-disable.
+ //
+ DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
+ for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK);
+ } else {
+ Pde = (UINT64*)(UINTN)gSmiCr3;
+ }
+ for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
+ Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);
+ if (Pte == 0) {
+ continue;
+ }
+ for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
+ Address = (((Level2 << 9) + Level3) << 21);
+
+ if ((*Pte & IA32_PG_PS) != 0) {
+ // 2MB page
+
+ if (!IsAddressValid (Address, &Nx)) {
+ //
+ // Patch to remove present flag and rw flag
+ //
+ *Pte = *Pte & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
+ }
+ if (Nx && mXdSupported) {
+ *Pte = *Pte | IA32_PG_NX;
+ }
+ } else {
+ // 4KB page
+ Pt = (UINT64 *)(UINTN)(*Pte & PHYSICAL_ADDRESS_MASK);
+ if (Pt == 0) {
+ continue;
+ }
+ for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
+ if (!IsAddressValid (Address, &Nx)) {
+ *Pt = *Pt & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
+ }
+ if (Nx && mXdSupported) {
+ *Pt = *Pt | IA32_PG_NX;
+ }
+ Address += SIZE_4KB;
+ } // end for PT
+ } // end if PS
+ } // end for PTE
+ } // end for PDE
+ }
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+ DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
+
+ return ;
+}
+
+/**
+ To find Fadt in Acpi tables.
+
+ @param AcpiTableGuid The guid used to find ACPI table in UEFI ConfigurationTable.
+
+ @return Fadt table pointer.
+**/
+EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *
+FindAcpiFadtTableByAcpiGuid (
+ IN EFI_GUID *AcpiTableGuid
+ )
+{
+ EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER *Rsdp;
+ EFI_ACPI_DESCRIPTION_HEADER *Rsdt;
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
+ UINTN Index;
+ UINT32 Data32;
+ Rsdp = NULL;
+ Rsdt = NULL;
+ Fadt = NULL;
+ //
+ // found ACPI table RSD_PTR from system table
+ //
+ for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {
+ if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), AcpiTableGuid)) {
+ //
+ // A match was found.
+ //
+ Rsdp = gST->ConfigurationTable[Index].VendorTable;
+ break;
+ }
+ }
+
+ if (Rsdp == NULL) {
+ return NULL;
+ }
+
+ Rsdt = (EFI_ACPI_DESCRIPTION_HEADER *)(UINTN) Rsdp->RsdtAddress;
+ if (Rsdt == NULL || Rsdt->Signature != EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE) {
+ return NULL;
+ }
+
+ for (Index = sizeof (EFI_ACPI_DESCRIPTION_HEADER); Index < Rsdt->Length; Index = Index + sizeof (UINT32)) {
+
+ Data32 = *(UINT32 *) ((UINT8 *) Rsdt + Index);
+ Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) (UINT32 *) (UINTN) Data32;
+ if (Fadt->Header.Signature == EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) {
+ break;
+ }
+ }
+
+ if (Fadt == NULL || Fadt->Header.Signature != EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) {
+ return NULL;
+ }
+
+ return Fadt;
+}
+
+/**
+ To find Fadt in Acpi tables.
+
+ @return Fadt table pointer.
+**/
+EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *
+FindAcpiFadtTable (
+ VOID
+ )
+{
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
+
+ Fadt = FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid);
+ if (Fadt != NULL) {
+ return Fadt;
+ }
+
+ return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid);
+}
+
+/**
+ To get system port address of the SMI Command Port in FADT table.
+
+**/
+VOID
+GetSmiCommandPort (
+ VOID
+ )
+{
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
+
+ Fadt = FindAcpiFadtTable ();
+ ASSERT (Fadt != NULL);
+
+ mSmiCommandPort = Fadt->SmiCmd;
+ DEBUG ((EFI_D_ERROR, "mSmiCommandPort = %x\n", mSmiCommandPort));
+}
+
+/**
+ Updates page table to make some memory ranges (like system memory) absent
+ and make some memory ranges (like MMIO) present and execute disable. It also
+ update 2MB-page to 4KB-page for some memory ranges.
+
+**/
+VOID
+SmmProfileStart (
+ VOID
+ )
+{
+ //
+ // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
+ //
+ InitPaging ();
+
+ //
+ // The flag indicates SMM profile starts to work.
+ //
+ mSmmProfileStart = TRUE;
+}
+
+/**
+ Initialize SMM profile in SmmReadyToLock protocol callback function.
+
+ @param Protocol Points to the protocol's unique identifier.
+ @param Interface Points to the interface instance.
+ @param Handle The handle on which the interface was installed.
+
+ @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
+**/
+EFI_STATUS
+EFIAPI
+InitSmmProfileCallBack (
+ IN CONST EFI_GUID *Protocol,
+ IN VOID *Interface,
+ IN EFI_HANDLE Handle
+ )
+{
+ EFI_STATUS Status;
+
+ //
+ // Save to variable so that SMM profile data can be found.
+ //
+ Status = gRT->SetVariable (
+ SMM_PROFILE_NAME,
+ &gEfiCallerIdGuid,
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
+ sizeof(mSmmProfileBase),
+ &mSmmProfileBase
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Get Software SMI from Fadt
+ //
+ GetSmiCommandPort ();
+
+ //
+ // Init protected mem range for patching page table later.
+ //
+ InitProtectedMemRange ();
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initialize SMM profile data structures.
+
+**/
+VOID
+InitSmmProfileInternal (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Base;
+ VOID *Registration;
+ UINTN Index;
+ UINTN MsrDsAreaSizePerCpu;
+ UINTN TotalSize;
+
+ //
+ // Allocate memory for SmmProfile below 4GB.
+ // The base address
+ //
+ ASSERT ((mSmmProfileSize & 0xFFF) == 0);
+
+ if (mBtsSupported) {
+ TotalSize = mSmmProfileSize + mMsrDsAreaSize;
+ } else {
+ TotalSize = mSmmProfileSize;
+ }
+
+ Base = 0xFFFFFFFF;
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ EfiReservedMemoryType,
+ EFI_SIZE_TO_PAGES (TotalSize),
+ &Base
+ );
+ ASSERT_EFI_ERROR (Status);
+ ZeroMem ((VOID *)(UINTN)Base, TotalSize);
+ mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
+
+ //
+ // Initialize SMM profile data header.
+ //
+ mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
+ mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
+ mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
+ mSmmProfileBase->CurDataEntries = 0;
+ mSmmProfileBase->CurDataSize = 0;
+ mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
+ mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
+ mSmmProfileBase->NumSmis = 0;
+ mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+
+ if (mBtsSupported) {
+ mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
+ MsrDsAreaSizePerCpu = mMsrDsAreaSize / FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber);
+ mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
+ for (Index = 0; Index < FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) {
+ mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
+ mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
+ mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
+
+ mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
+ mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
+ mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
+ mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
+
+ mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
+ mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
+ mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
+ mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
+ }
+ }
+
+ mProtectionMemRange = mProtectionMemRangeTemplate;
+ mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
+
+ //
+ // Update TSeg entry.
+ //
+ mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
+ mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
+
+ //
+ // Update SMM profile entry.
+ //
+ mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
+ mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
+
+ //
+ // Allocate memory reserved for creating 4KB pages.
+ //
+ InitPagesForPFHandler ();
+
+ //
+ // Start SMM profile when SmmReadyToLock protocol is installed.
+ //
+ Status = gSmst->SmmRegisterProtocolNotify (
+ &gEfiSmmReadyToLockProtocolGuid,
+ InitSmmProfileCallBack,
+ &Registration
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ return ;
+}
+
+/**
+ Check if XD feature is supported by a processor.
+
+**/
+VOID
+CheckFeatureSupported (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT32 RegEdx;
+
+ if (mXdSupported) {
+ AsmCpuid (EFI_CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax <= EFI_CPUID_EXTENDED_FUNCTION) {
+ //
+ // Entended CPUID functions are not supported on this processor.
+ //
+ mXdSupported = FALSE;
+ }
+
+ AsmCpuid (EFI_CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
+ //
+ // Execute Disable Bit feature is not supported on this processor.
+ //
+ mXdSupported = FALSE;
+ }
+ }
+
+ if (mBtsSupported) {
+ AsmCpuid (EFI_CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
+ //
+ // Per IA32 manuals:
+ // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
+ // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
+ // availability of the BTS facilities, including the ability to set the BTS and
+ // BTINT bits in the MSR_DEBUGCTLA MSR.
+ // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
+ //
+ if ((AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 11, 11) == 0) &&
+ (AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 12, 12) == 0)) {
+ //
+ // BTS facilities is supported.
+ //
+ mBtsSupported = FALSE;
+ }
+ }
+ }
+}
+
+/**
+ Check if XD and BTS features are supported by all processors.
+
+ @param MpServices The MP services protocol.
+
+**/
+VOID
+CheckProcessorFeature (
+ IN EFI_MP_SERVICES_PROTOCOL *MpServices
+ )
+{
+ mXdSupported = TRUE;
+ mBtsSupported = TRUE;
+
+ //
+ // Check if XD and BTS are supported on all processors.
+ //
+ CheckFeatureSupported ();
+
+ //
+ //Check on other processors if BSP supports this
+ //
+ if (mXdSupported || mBtsSupported) {
+ MpServices->StartupAllAPs (
+ MpServices,
+ (EFI_AP_PROCEDURE) CheckFeatureSupported,
+ TRUE,
+ NULL,
+ 0,
+ NULL,
+ NULL
+ );
+ }
+}
+
+/**
+ Enable XD feature.
+
+**/
+VOID
+ActivateXd (
+ VOID
+ )
+{
+ UINT64 MsrRegisters;
+
+ MsrRegisters = AsmReadMsr64 (MSR_EFER);
+ if ((MsrRegisters & MSR_EFER_XD) != 0) {
+ return ;
+ }
+ MsrRegisters |= MSR_EFER_XD;
+ AsmWriteMsr64 (MSR_EFER, MsrRegisters);
+}
+
+/**
+ Enable single step.
+
+**/
+VOID
+ActivateSingleStepDB (
+ VOID
+ )
+{
+ UINTN Dr6;
+
+ Dr6 = AsmReadDr6 ();
+ if ((Dr6 & DR6_SINGLE_STEP) != 0) {
+ return;
+ }
+ Dr6 |= DR6_SINGLE_STEP;
+ AsmWriteDr6 (Dr6);
+}
+
+/**
+ Enable last branch.
+
+**/
+VOID
+ActivateLBR (
+ VOID
+ )
+{
+ UINT64 DebugCtl;
+
+ DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
+ if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
+ return ;
+ }
+ AsmWriteMsr64 (MSR_LER_FROM_LIP, 0);
+ AsmWriteMsr64 (MSR_LER_TO_LIP, 0);
+ DebugCtl |= MSR_DEBUG_CTL_LBR;
+ AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
+}
+
+/**
+ Enable branch trace store.
+
+ @param CpuIndex The index of the processor.
+
+**/
+VOID
+ActivateBTS (
+ IN UINTN CpuIndex
+ )
+{
+ UINT64 DebugCtl;
+
+ DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
+ if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
+ return ;
+ }
+
+ AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
+ DebugCtl |= (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
+ DebugCtl &= ~MSR_DEBUG_CTL_BTINT;
+ AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
+}
+
+/**
+ Increase SMI number in each SMI entry.
+
+**/
+VOID
+SmmProfileRecordSmiNum (
+ VOID
+ )
+{
+ if (mSmmProfileStart) {
+ mSmmProfileBase->NumSmis++;
+ }
+}
+
+/**
+ Initialize processor environment for SMM profile.
+
+ @param CpuIndex The index of the processor.
+
+**/
+VOID
+ActivateSmmProfile (
+ IN UINTN CpuIndex
+ )
+{
+ //
+ // Try to enable NX
+ //
+ if (mXdSupported) {
+ ActivateXd ();
+ }
+
+ //
+ // Enable Single Step DB#
+ //
+ ActivateSingleStepDB ();
+
+ if (mBtsSupported) {
+ //
+ // We can not get useful information from LER, so we have to use BTS.
+ //
+ ActivateLBR ();
+
+ //
+ // Enable BTS
+ //
+ ActivateBTS (CpuIndex);
+ }
+}
+
+/**
+ Initialize SMM profile in SMM CPU entrypoint.
+
+**/
+VOID
+InitSmmProfile (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_MP_SERVICES_PROTOCOL *MpServices;
+
+ Status = gBS->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // First detect if XD and BTS are supported
+ //
+ CheckProcessorFeature (MpServices);
+
+ //
+ // Init SmmProfile here
+ //
+ InitSmmProfileInternal ();
+
+ //
+ // Init profile IDT.
+ //
+ InitIdtr ();
+
+ //
+ // Patch SmmS3ResumeState->SmmS3Cr3
+ //
+ InitSmmS3Cr3 ();
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+RestorePageTableBelow4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode
+ )
+{
+ UINTN PTIndex;
+ UINTN PFIndex;
+
+ //
+ // PML4
+ //
+ if (sizeof(UINT64) == sizeof(UINTN)) {
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ }
+
+ //
+ // PDPTE
+ //
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+
+ //
+ // PD
+ //
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ //
+ // Large page
+ //
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Set new entry
+ //
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
+ PageTable[PTIndex] |= IA32_PG_PS;
+ PageTable[PTIndex] |= IA32_PG_RW | IA32_PG_P;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+ } else {
+ //
+ // Small page
+ //
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+
+ //
+ // 4K PTE
+ //
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Set new entry
+ //
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
+ PageTable[PTIndex] |= IA32_PG_RW | IA32_PG_P;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+ }
+}
+
+/**
+ The Page fault handler to save SMM profile data.
+
+ @param Rip The RIP when exception happens.
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+SmmProfilePFHandler (
+ UINTN Rip,
+ UINTN ErrorCode
+ )
+{
+ UINT64 *PageTable;
+ UINT64 PFAddress;
+ UINTN CpuIndex;
+ UINTN Index;
+ UINT64 InstructionAddress;
+ UINTN MaxEntryNumber;
+ UINTN CurrentEntryNumber;
+ BOOLEAN IsValidPFAddress;
+ SMM_PROFILE_ENTRY *SmmProfileEntry;
+ UINT64 SmiCommand;
+ EFI_STATUS Status;
+ UINTN SwSmiCpuIndex;
+ UINT8 SoftSmiValue;
+ EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
+
+ if (!mSmmProfileStart) {
+ //
+ // If SMM profile does not start, call original page fault handler.
+ //
+ SmiDefaultPFHandler ();
+ return;
+ }
+
+ if (mBtsSupported) {
+ DisableBTS ();
+ }
+
+ IsValidPFAddress = FALSE;
+ PageTable = (UINT64 *)AsmReadCr3 ();
+ PFAddress = AsmReadCr2 ();
+ CpuIndex = GetCpuIndex ();
+
+ if (PFAddress <= 0xFFFFFFFF) {
+ RestorePageTableBelow4G (PageTable, PFAddress, CpuIndex, ErrorCode);
+ } else {
+ RestorePageTableAbove4G (PageTable, PFAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
+ }
+
+ if (!IsValidPFAddress) {
+ InstructionAddress = Rip;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
+ //
+ // If it is instruction fetch failure, get the correct IP from BTS.
+ //
+ InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
+ if (InstructionAddress == 0) {
+ //
+ // It indicates the instruction which caused page fault is not a jump instruction,
+ // set instruction address same as the page fault address.
+ //
+ InstructionAddress = PFAddress;
+ }
+ }
+
+ //
+ // Try to find which CPU trigger SWSMI
+ //
+ SwSmiCpuIndex = 0;
+ //
+ // Indicate it is not software SMI
+ //
+ SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
+ if (EFI_ERROR (Status)) {
+ continue;
+ }
+ if (IoInfo.IoPort == mSmiCommandPort) {
+ //
+ // Great! Find it.
+ //
+ SwSmiCpuIndex = Index;
+ //
+ // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
+ //
+ SoftSmiValue = IoRead8 (mSmiCommandPort);
+ SmiCommand = (UINT64)SoftSmiValue;
+ break;
+ }
+ }
+
+ SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
+ //
+ // Check if there is already a same entry in profile data.
+ //
+ for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
+ if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
+ (SmmProfileEntry[Index].Address == PFAddress) &&
+ (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
+ (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
+ (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {
+ //
+ // Same record exist, need not save again.
+ //
+ break;
+ }
+ }
+ if (Index == mSmmProfileBase->CurDataEntries) {
+ CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
+ MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;
+ if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
+ CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
+ }
+ if (CurrentEntryNumber < MaxEntryNumber) {
+ //
+ // Log the new entry
+ //
+ SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
+ SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
+ SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
+ SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
+ SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
+ SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
+ SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
+ //
+ // Update current entry index and data size in the header.
+ //
+ mSmmProfileBase->CurDataEntries++;
+ mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
+ }
+ }
+ }
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+
+ if (mBtsSupported) {
+ EnableBTS ();
+ }
+}
+
+/**
+ Replace INT1 exception handler to restore page table to absent/execute-diable state
+ in order to trigger page fault again to save SMM profile data..
+
+**/
+VOID
+InitIdtr (
+ VOID
+ )
+{
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.h
new file mode 100644
index 0000000000..0c5411b7ba
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfile.h
@@ -0,0 +1,76 @@
+/** @file
+ SMM profile header file.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _SMM_PROFILE_H_
+#define _SMM_PROFILE_H_
+
+//
+// External functions
+//
+
+/**
+ Initialize processor environment for SMM profile.
+
+ @param CpuIndex The index of the processor.
+
+**/
+VOID
+ActivateSmmProfile (
+ IN UINTN CpuIndex
+ );
+
+/**
+ Initialize SMM profile in SMM CPU entrypoint.
+
+**/
+VOID
+InitSmmProfile (
+ VOID
+ );
+
+/**
+ Increase SMI number in each SMI entry.
+
+**/
+VOID
+SmmProfileRecordSmiNum (
+ VOID
+ );
+
+/**
+ The Page fault handler to save SMM profile data.
+
+ @param Rip The RIP when exception happens.
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+SmmProfilePFHandler (
+ UINTN Rip,
+ UINTN ErrorCode
+ );
+
+/**
+ Updates page table to make some memory ranges (like system memory) absent
+ and make some memory ranges (like MMIO) present and execute disable. It also
+ update 2MB-page to 4KB-page for some memory ranges.
+
+**/
+VOID
+SmmProfileStart (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_H_
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfileInternal.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfileInternal.h
new file mode 100644
index 0000000000..7403c4fc46
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SmmProfileInternal.h
@@ -0,0 +1,190 @@
+/** @file
+ SMM profile internal header file.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _SMM_PROFILE_INTERNAL_H_
+#define _SMM_PROFILE_INTERNAL_H_
+
+#include <Guid/GlobalVariable.h>
+#include <Guid/Acpi.h>
+#include <Protocol/SmmReadyToLock.h>
+#include <Library/UefiRuntimeServicesTableLib.h>
+#include <Library/DxeServicesTableLib.h>
+#include <Library/CpuLib.h>
+#include <IndustryStandard/Acpi.h>
+
+#include "SmmProfileArch.h"
+
+//
+// Config the SMM_PROFILE DTS region size
+//
+#define SMM_PROFILE_DTS_SIZE (4 * 1024 * 1024) // 4M
+
+#define MAX_PF_PAGE_COUNT 0x2
+
+#define PEBS_RECORD_NUMBER 0x2
+
+#define MAX_PF_ENTRY_COUNT 10
+
+//
+// This MACRO just enable unit test for the profile
+// Please disable it.
+//
+
+#define IA32_PF_EC_P (1u << 0)
+#define IA32_PF_EC_WR (1u << 1)
+#define IA32_PF_EC_US (1u << 2)
+#define IA32_PF_EC_RSVD (1u << 3)
+#define IA32_PF_EC_ID (1u << 4)
+
+#define SMM_PROFILE_NAME L"SmmProfileData"
+
+//
+// CPU generic definition
+//
+#define IA32_PG_NX (1ll << 63)
+
+#define IA32_CPUID_SS 0x08000000
+
+#define CPUID1_EDX_XD_SUPPORT 0x100000
+#define MSR_EFER 0xc0000080
+#define MSR_EFER_XD 0x800
+
+#define CPUID1_EDX_BTS_AVAILABLE 0x200000
+
+#define DR6_SINGLE_STEP 0x4000
+#define RFLAG_TF 0x100
+
+#define MSR_DEBUG_CTL 0x1D9
+#define MSR_DEBUG_CTL_LBR 0x1
+#define MSR_DEBUG_CTL_TR 0x40
+#define MSR_DEBUG_CTL_BTS 0x80
+#define MSR_DEBUG_CTL_BTINT 0x100
+#define MSR_LASTBRANCH_TOS 0x1C9
+#define MSR_LER_FROM_LIP 0x1DD
+#define MSR_LER_TO_LIP 0x1DE
+#define MSR_DS_AREA 0x600
+
+//
+// CPU generic definition
+//
+#define IA32_PG_NX (1ll << 63)
+
+#define IA32_CPUID_SS 0x08000000
+
+#define CPUID1_EDX_XD_SUPPORT 0x100000
+#define MSR_EFER 0xc0000080
+#define MSR_EFER_XD 0x800
+
+#define DR6_SINGLE_STEP 0x4000
+#define RFLAG_TF 0x100
+
+#define MSR_DEBUG_CTL 0x1D9
+#define MSR_DEBUG_CTL_LBR 0x1
+#define MSR_DEBUG_CTL_TR 0x40
+#define MSR_DEBUG_CTL_BTS 0x80
+#define MSR_DEBUG_CTL_BTINT 0x100
+#define MSR_LASTBRANCH_TOS 0x1C9
+#define MSR_LER_FROM_LIP 0x1DD
+#define MSR_LER_TO_LIP 0x1DE
+#define MSR_DS_AREA 0x600
+
+typedef struct {
+ EFI_PHYSICAL_ADDRESS Base;
+ EFI_PHYSICAL_ADDRESS Top;
+} MEMORY_RANGE;
+
+typedef struct {
+ MEMORY_RANGE Range;
+ BOOLEAN Present;
+ BOOLEAN Nx;
+} MEMORY_PROTECTION_RANGE;
+
+typedef struct {
+ UINT64 HeaderSize;
+ UINT64 MaxDataEntries;
+ UINT64 MaxDataSize;
+ UINT64 CurDataEntries;
+ UINT64 CurDataSize;
+ UINT64 TsegStart;
+ UINT64 TsegSize;
+ UINT64 NumSmis;
+ UINT64 NumCpus;
+} SMM_PROFILE_HEADER;
+
+typedef struct {
+ UINT64 SmiNum;
+ UINT64 CpuNum;
+ UINT64 ApicId;
+ UINT64 ErrorCode;
+ UINT64 Instruction;
+ UINT64 Address;
+ UINT64 SmiCmd;
+} SMM_PROFILE_ENTRY;
+
+extern SMM_S3_RESUME_STATE *mSmmS3ResumeState;
+extern UINTN gSmiExceptionHandlers[];
+extern BOOLEAN mXdSupported;
+extern UINTN mPFEntryCount[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)];
+extern UINT64 mLastPFEntryValue[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)][MAX_PF_ENTRY_COUNT];
+extern UINT64 *mLastPFEntryPointer[FixedPcdGet32 (PcdCpuMaxLogicalProcessorNumber)][MAX_PF_ENTRY_COUNT];
+
+//
+// Internal functions
+//
+
+/**
+ Update IDT table to replace page fault handler and INT 1 handler.
+
+**/
+VOID
+InitIdtr (
+ VOID
+ );
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+
+**/
+BOOLEAN
+IsAddressSplit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ );
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+ @param Nx The flag indicates if the memory is execute-disable.
+
+**/
+BOOLEAN
+IsAddressValid (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN *Nx
+ );
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+EFIAPI
+SmiDefaultPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_H_
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SyncTimer.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SyncTimer.c
new file mode 100644
index 0000000000..a2c2f1e75b
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/SyncTimer.c
@@ -0,0 +1,111 @@
+/** @file
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+UINT64 mTimeoutTicker = 0;
+//
+// Number of counts in a roll-over cycle of the performance counter.
+//
+UINT64 mCycle = 0;
+//
+// Flag to indicate the performance counter is count-up or count-down.
+//
+BOOLEAN mCountDown;
+
+/**
+ Initialize Timer for Smm AP Sync.
+
+**/
+VOID
+InitializeSmmTimer (
+ VOID
+ )
+{
+ UINT64 TimerFrequency;
+ UINT64 Start;
+ UINT64 End;
+
+ TimerFrequency = GetPerformanceCounterProperties (&Start, &End);
+ mTimeoutTicker = DivU64x32 (
+ MultU64x64(TimerFrequency, PcdGet64 (PcdCpuSmmApSyncTimeout)),
+ 1000 * 1000
+ );
+ if (End < Start) {
+ mCountDown = TRUE;
+ mCycle = Start - End;
+ } else {
+ mCountDown = FALSE;
+ mCycle = End - Start;
+ }
+}
+
+/**
+ Start Timer for Smm AP Sync.
+
+**/
+UINT64
+EFIAPI
+StartSyncTimer (
+ VOID
+ )
+{
+ return GetPerformanceCounter ();
+}
+
+/**
+ Check if the Smm AP Sync timer is timeout.
+
+ @param Timer The start timer from the begin.
+
+**/
+BOOLEAN
+EFIAPI
+IsSyncTimerTimeout (
+ IN UINT64 Timer
+ )
+{
+ UINT64 CurrentTimer;
+ UINT64 Delta;
+
+ CurrentTimer = GetPerformanceCounter ();
+
+ if (mCountDown) {
+ //
+ // The performance counter counts down. Check for roll over condition.
+ //
+ if (CurrentTimer < Timer) {
+ Delta = Timer - CurrentTimer;
+ } else {
+ //
+ // Handle one roll-over.
+ //
+ Delta = mCycle - (CurrentTimer - Timer);
+ }
+ } else {
+ //
+ // The performance counter counts up. Check for roll over condition.
+ //
+ if (CurrentTimer > Timer) {
+ Delta = CurrentTimer - Timer;
+ } else {
+ //
+ // Handle one roll-over.
+ //
+ Delta = mCycle - (Timer - CurrentTimer);
+ }
+ }
+
+ return (BOOLEAN) (Delta >= mTimeoutTicker);
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.S
new file mode 100644
index 0000000000..c311568a17
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.S
@@ -0,0 +1,198 @@
+## @file
+# This is the assembly code for Multi-processor S3 support
+#
+# Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+.equ VacantFlag, 0x0
+.equ NotVacantFlag, 0xff
+
+.equ LockLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+.equ StackStartAddressLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x08
+.equ StackSizeLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x10
+.equ CProcedureLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x18
+.equ GdtrLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x20
+.equ IdtrLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x2A
+.equ BufferStartLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x34
+.equ Cr3OffsetLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x38
+
+#-------------------------------------------------------------------------------------
+#RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+#procedure serializes all the AP processors through an Init sequence. It must be
+#noted that APs arrive here very raw...ie: real mode, no stack.
+#ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+#IS IN MACHINE CODE.
+#-------------------------------------------------------------------------------------
+#RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+.code:
+
+ASM_GLOBAL ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+# At this point CS = 0x(vv00) and ip= 0x0.
+
+ .byte 0x8c,0xc8 # mov ax, cs
+ .byte 0x8e,0xd8 # mov ds, ax
+ .byte 0x8e,0xc0 # mov es, ax
+ .byte 0x8e,0xd0 # mov ss, ax
+ .byte 0x33,0xc0 # xor ax, ax
+ .byte 0x8e,0xe0 # mov fs, ax
+ .byte 0x8e,0xe8 # mov gs, ax
+
+flat32Start:
+
+ .byte 0xBE
+ .word BufferStartLocation
+ .byte 0x66,0x8B,0x14 # mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
+
+ .byte 0xBE
+ .word Cr3OffsetLocation
+ .byte 0x66,0x8B,0xC # mov ecx,dword ptr [si] ; ECX is keeping the value of CR3
+
+ .byte 0xBE
+ .word GdtrLocation
+ .byte 0x66 # db 66h
+ .byte 0x2E,0xF,0x1,0x14 # lgdt fword ptr cs:[si]
+
+ .byte 0xBE
+ .word IdtrLocation
+ .byte 0x66 # db 66h
+ .byte 0x2E,0xF,0x1,0x1C # lidt fword ptr cs:[si]
+
+ .byte 0x33,0xC0 # xor ax, ax
+ .byte 0x8E,0xD8 # mov ds, ax
+
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Get control register 0
+ .byte 0x66,0x83,0xC8,0x1 # or eax, 000000001h ; Set PE bit (bit #0)
+ .byte 0xF,0x22,0xC0 # mov cr0, eax
+
+FLAT32_JUMP:
+
+ .byte 0x66,0x67,0xEA # far jump
+ .long 0x0 # 32-bit offset
+ .word 0x20 # 16-bit selector
+
+NemInit: # protected mode entry point
+
+ .byte 0x66,0xB8,0x18,0x0 # mov ax, 18h
+ .byte 0x66,0x8E,0xD8 # mov ds, ax
+ .byte 0x66,0x8E,0xC0 # mov es, ax
+ .byte 0x66,0x8E,0xE0 # mov fs, ax
+ .byte 0x66,0x8E,0xE8 # mov gs, ax
+ .byte 0x66,0x8E,0xD0 # mov ss, ax ; Flat mode setup.
+
+ .byte 0xF,0x20,0xE0 # mov eax, cr4
+ .byte 0xF,0xBA,0xE8,0x5 # bts eax, 5
+ .byte 0xF,0x22,0xE0 # mov cr4, eax
+
+ .byte 0xF,0x22,0xD9 # mov cr3, ecx
+
+ .byte 0x8B,0xF2 # mov esi, edx ; Save wakeup buffer address
+
+ .byte 0xB9
+ .long 0xC0000080 # mov ecx, 0c0000080h ; EFER MSR number.
+ .byte 0xF,0x32 # rdmsr ; Read EFER.
+ .byte 0xF,0xBA,0xE8,0x8 # bts eax, 8 ; Set LME=1.
+ .byte 0xF,0x30 # wrmsr ; Write EFER.
+
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Read CR0.
+ .byte 0xF,0xBA,0xE8,0x1F # bts eax, 31 ; Set PG=1.
+ .byte 0xF,0x22,0xC0 # mov cr0, eax ; Write CR0.
+
+LONG_JUMP:
+
+ .byte 0x67,0xEA # far jump
+ .long 0x0 # 32-bit offset
+ .word 0x38 # 16-bit selector
+
+LongModeStart:
+
+ movw $0x30,%ax
+ .byte 0x66
+ movw %ax,%ds
+ .byte 0x66
+ movw %ax,%es
+ .byte 0x66
+ movw %ax,%ss
+
+ movl %esi,%edi
+ addl $LockLocation, %edi
+ movb $NotVacantFlag, %al
+TestLock:
+ xchgb (%edi), %al
+ cmpb $NotVacantFlag, %al
+ jz TestLock
+
+ProgramStack:
+
+ movl %esi,%edi
+ addl $StackSizeLocation, %edi
+ movq (%edi), %rax
+ movl %esi,%edi
+ addl $StackStartAddressLocation, %edi
+ addq (%edi), %rax
+ movq %rax, %rsp
+ movq %rax, (%edi)
+
+Releaselock:
+
+ movb $VacantFlag, %al
+ movl %esi,%edi
+ addl $LockLocation, %edi
+ xchgb (%edi), %al
+
+ #
+ # Call assembly function to initialize FPU.
+ #
+ movabsq $ASM_PFX(InitializeFloatingPointUnits), %rax
+ subq $0x20, %rsp
+ call *%rax
+ addq $0x20, %rsp
+ #
+ # Call C Function
+ #
+ movl %esi,%edi
+ addl $CProcedureLocation, %edi
+ movq (%edi), %rax
+
+ testq %rax, %rax
+ jz GoToSleep
+
+ subq $0x20, %rsp
+ call *%rax
+ addq $0x20, %rsp
+
+GoToSleep:
+ cli
+ hlt
+ jmp .-2
+
+RendezvousFunnelProcEnd:
+
+
+#-------------------------------------------------------------------------------------
+# AsmGetAddressMap (&AddressMap);
+#-------------------------------------------------------------------------------------
+# comments here for definition of address map
+ASM_GLOBAL ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+ movabsq $RendezvousFunnelProcStart, %rax
+ movq %rax, (%rcx)
+ movq $(NemInit - RendezvousFunnelProcStart), 0x08(%rcx)
+ movq $(FLAT32_JUMP - RendezvousFunnelProcStart), 0x10(%rcx)
+ movq $(RendezvousFunnelProcEnd - RendezvousFunnelProcStart), 0x18(%rcx)
+ movq $(LongModeStart - RendezvousFunnelProcStart), 0x20(%rcx)
+ movq $(LONG_JUMP - RendezvousFunnelProcStart), 0x28(%rcx)
+ ret
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.asm
new file mode 100644
index 0000000000..f0a0bc3f89
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/MpFuncs.asm
@@ -0,0 +1,201 @@
+;; @file
+; This is the assembly code for Multi-processor S3 support
+;
+; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+EXTERN InitializeFloatingPointUnits:PROC
+
+VacantFlag Equ 00h
+NotVacantFlag Equ 0ffh
+
+LockLocation equ RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+StackStartAddressLocation equ LockLocation + 08h
+StackSizeLocation equ LockLocation + 10h
+CProcedureLocation equ LockLocation + 18h
+GdtrLocation equ LockLocation + 20h
+IdtrLocation equ LockLocation + 2Ah
+BufferStartLocation equ LockLocation + 34h
+Cr3OffsetLocation equ LockLocation + 38h
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+;text SEGMENT
+.code
+
+RendezvousFunnelProc PROC
+RendezvousFunnelProcStart::
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ db 8ch, 0c8h ; mov ax, cs
+ db 8eh, 0d8h ; mov ds, ax
+ db 8eh, 0c0h ; mov es, ax
+ db 8eh, 0d0h ; mov ss, ax
+ db 33h, 0c0h ; xor ax, ax
+ db 8eh, 0e0h ; mov fs, ax
+ db 8eh, 0e8h ; mov gs, ax
+
+flat32Start::
+
+ db 0BEh
+ dw BufferStartLocation ; mov si, BufferStartLocation
+ db 66h, 8Bh, 14h ; mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
+
+ db 0BEh
+ dw Cr3OffsetLocation ; mov si, Cr3Location
+ db 66h, 8Bh, 0Ch ; mov ecx,dword ptr [si] ; ECX is keeping the value of CR3
+
+ db 0BEh
+ dw GdtrLocation ; mov si, GdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 14h ; lgdt fword ptr cs:[si]
+
+ db 0BEh
+ dw IdtrLocation ; mov si, IdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 1Ch ; lidt fword ptr cs:[si]
+
+ db 33h, 0C0h ; xor ax, ax
+ db 8Eh, 0D8h ; mov ds, ax
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0
+ db 66h, 83h, 0C8h, 01h ; or eax, 000000001h ; Set PE bit (bit #0)
+ db 0Fh, 22h, 0C0h ; mov cr0, eax
+
+FLAT32_JUMP::
+
+ db 66h, 67h, 0EAh ; far jump
+ dd 0h ; 32-bit offset
+ dw 20h ; 16-bit selector
+
+NemInit:: ; protected mode entry point
+
+ db 66h, 0B8h, 18h, 00h ; mov ax, 18h
+ db 66h, 8Eh, 0D8h ; mov ds, ax
+ db 66h, 8Eh, 0C0h ; mov es, ax
+ db 66h, 8Eh, 0E0h ; mov fs, ax
+ db 66h, 8Eh, 0E8h ; mov gs, ax
+ db 66h, 8Eh, 0D0h ; mov ss, ax ; Flat mode setup.
+
+ db 0Fh, 20h, 0E0h ; mov eax, cr4
+ db 0Fh, 0BAh, 0E8h, 05h ; bts eax, 5
+ db 0Fh, 22h, 0E0h ; mov cr4, eax
+
+ db 0Fh, 22h, 0D9h ; mov cr3, ecx
+
+ db 8Bh, 0F2h ; mov esi, edx ; Save wakeup buffer address
+
+ db 0B9h
+ dd 0C0000080h ; mov ecx, 0c0000080h ; EFER MSR number.
+ db 0Fh, 32h ; rdmsr ; Read EFER.
+ db 0Fh, 0BAh, 0E8h, 08h ; bts eax, 8 ; Set LME=1.
+ db 0Fh, 30h ; wrmsr ; Write EFER.
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Read CR0.
+ db 0Fh, 0BAh, 0E8h, 1Fh ; bts eax, 31 ; Set PG=1.
+ db 0Fh, 22h, 0C0h ; mov cr0, eax ; Write CR0.
+
+LONG_JUMP::
+
+ db 67h, 0EAh ; far jump
+ dd 0h ; 32-bit offset
+ dw 38h ; 16-bit selector
+
+LongModeStart::
+
+ mov ax, 30h
+ mov ds, ax
+ mov es, ax
+ mov ss, ax
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock::
+ xchg byte ptr [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack::
+
+ mov edi, esi
+ add edi, StackSizeLocation
+ mov rax, qword ptr [edi]
+ mov edi, esi
+ add edi, StackStartAddressLocation
+ add rax, qword ptr [edi]
+ mov rsp, rax
+ mov qword ptr [edi], rax
+
+Releaselock::
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte ptr [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov rax, InitializeFloatingPointUnits
+ sub rsp, 20h
+ call rax
+ add rsp, 20h
+
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, CProcedureLocation
+ mov rax, qword ptr [edi]
+
+ test rax, rax
+ jz GoToSleep
+
+ sub rsp, 20h
+ call rax
+ add rsp, 20h
+
+GoToSleep::
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProcEnd::
+RendezvousFunnelProc ENDP
+
+
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+; comments here for definition of address map
+AsmGetAddressMap PROC
+ mov rax, offset RendezvousFunnelProcStart
+ mov qword ptr [rcx], rax
+ mov qword ptr [rcx+8h], NemInit - RendezvousFunnelProcStart
+ mov qword ptr [rcx+10h], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov qword ptr [rcx+18h], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+ mov qword ptr [rcx+20h], LongModeStart - RendezvousFunnelProcStart
+ mov qword ptr [rcx+28h], LONG_JUMP - RendezvousFunnelProcStart
+ ret
+
+AsmGetAddressMap ENDP
+
+END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/PageTbl.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/PageTbl.c
new file mode 100644
index 0000000000..11f2660a6d
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -0,0 +1,647 @@
+/** @file
+ Page Fault (#PF) handler for X64 processors
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#define PAGE_TABLE_PAGES 8
+#define IA32_PG_PMNT (1ull << 62)
+#define ACC_MAX_BIT BIT3
+LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
+SPIN_LOCK mPFLock;
+BOOLEAN m1GPageTableSupport = FALSE;
+
+/**
+ Check if 1-GByte pages is supported by processor or not.
+
+ @retval TRUE 1-GByte pages is supported.
+ @retval FALSE 1-GByte pages is not supported.
+
+**/
+BOOLEAN
+Is1GPageSupport (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT32 RegEdx;
+ UINT64 mPhyMask;
+
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
+ mPhyMask = LShiftU64 (1, (UINT8)RegEax) - 1;
+ mPhyMask &= (1ull << 48) - SIZE_4KB;
+
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000001) {
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & BIT26) != 0) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/**
+ Set sub-entries number in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] SubEntryNum Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+
+**/
+VOID
+SetSubEntriesNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 SubEntryNum
+ )
+{
+ //
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
+ //
+ *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
+}
+
+/**
+ Return sub-entries number in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+**/
+UINT64
+GetSubEntriesNum (
+ IN UINT64 *Entry
+ )
+{
+ //
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
+ //
+ return BitFieldRead64 (*Entry, 52, 60);
+}
+
+/**
+ Create PageTable for SMM use.
+
+ @return The address of PML4 (to set CR3).
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+ LIST_ENTRY *FreePage;
+ UINTN Index;
+
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (&mPFLock);
+
+ m1GPageTableSupport = Is1GPageSupport ();
+ //
+ // Generate PAE page table for the first 4GB memory space
+ //
+ Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1);
+
+ //
+ // Set IA32_PG_PMNT bit to mask this entry
+ //
+ PTEntry = (UINT64*)(UINTN)Pages;
+ for (Index = 0; Index < 4; Index++) {
+ PTEntry[Index] |= IA32_PG_PMNT;
+ }
+
+ //
+ // Fill Page-Table-Level4 (PML4) entry
+ //
+ PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));
+ *PTEntry = Pages + IA32_PG_P;
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
+ //
+ // Set sub-entries number
+ //
+ SetSubEntriesNum (PTEntry, 3);
+
+ //
+ // Add remaining pages to page pool
+ //
+ FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));
+ while ((UINTN)FreePage < Pages) {
+ InsertTailList (&mPagePool, FreePage);
+ FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
+ }
+
+ //
+ // Register Smm Page Fault Handler
+ //
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+
+ //
+ // Return the address of PML4 (to set CR3)
+ //
+ return (UINT32)(UINTN)PTEntry;
+}
+
+/**
+ Set access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] Acc Access record value
+
+**/
+VOID
+SetAccNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 Acc
+ )
+{
+ //
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
+ //
+ *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
+}
+
+/**
+ Return access record in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Access record value.
+
+**/
+UINT64
+GetAccNum (
+ IN UINT64 *Entry
+ )
+{
+ //
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
+ //
+ return BitFieldRead64 (*Entry, 9, 11);
+}
+
+/**
+ Return and update the access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+
+ @return Access record value.
+
+**/
+UINT64
+GetAndUpdateAccNum (
+ IN OUT UINT64 *Entry
+ )
+{
+ UINT64 Acc;
+
+ Acc = GetAccNum (Entry);
+ if ((*Entry & IA32_PG_A) != 0) {
+ //
+ // If this entry has been accessed, clear access flag in Entry and update access record
+ // to the initializ value 7, adding ACC_MAX_BIT is to make it larger than others
+ //
+ *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
+ SetAccNum (Entry, 0x7);
+ return (0x7 + ACC_MAX_BIT);
+ } else {
+ if (Acc != 0) {
+ //
+ // If the access record is not the smallest value 0, minus 1 and update the access record field
+ //
+ SetAccNum (Entry, Acc - 1);
+ }
+ }
+ return Acc;
+}
+
+/**
+ Reclaim free pages for PageFault handler.
+
+ Search the whole entries tree to find the leaf entry that has the smallest
+ access record value. Insert the page pointed by this leaf entry into the
+ page pool. And check its upper entries if need to be inserted into the page
+ pool or not.
+
+**/
+VOID
+ReclaimPages (
+ VOID
+ )
+{
+ UINT64 *Pml4;
+ UINT64 *Pdpt;
+ UINT64 *Pdt;
+ UINTN Pml4Index;
+ UINTN PdptIndex;
+ UINTN PdtIndex;
+ UINTN MinPml4;
+ UINTN MinPdpt;
+ UINTN MinPdt;
+ UINT64 MinAcc;
+ UINT64 Acc;
+ UINT64 SubEntriesNum;
+ BOOLEAN PML4EIgnore;
+ BOOLEAN PDPTEIgnore;
+ UINT64 *ReleasePageAddress;
+
+ Pml4 = NULL;
+ Pdpt = NULL;
+ Pdt = NULL;
+ MinAcc = (UINT64)-1;
+ MinPml4 = (UINTN)-1;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ Acc = 0;
+ ReleasePageAddress = 0;
+
+ //
+ // Fristly, find the leaf entry has the smallest access record value
+ //
+ Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
+ for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PML4 entry is not present or is masked, skip it
+ //
+ continue;
+ }
+ Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);
+ PML4EIgnore = FALSE;
+ for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
+ if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PDPT entry is not present or is masked, skip it
+ //
+ if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PDPT entry is masked, we will ignore checking the PML4 entry
+ //
+ PML4EIgnore = TRUE;
+ }
+ continue;
+ }
+ if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
+ //
+ // It's not 1-GByte pages entry, it should be a PDPT entry,
+ // we will not check PML4 entry more
+ //
+ PML4EIgnore = TRUE;
+ Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);
+ PDPTEIgnore = FALSE;
+ for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
+ if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PD entry is not present or is masked, skip it
+ //
+ if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PD entry is masked, we will not PDPT entry more
+ //
+ PDPTEIgnore = TRUE;
+ }
+ continue;
+ }
+ if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
+ //
+ // It's not 2 MByte page table entry, it should be PD entry
+ // we will find the entry has the smallest access record value
+ //
+ PDPTEIgnore = TRUE;
+ Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
+ if (Acc < MinAcc) {
+ //
+ // If the PD entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = PdtIndex;
+ ReleasePageAddress = Pdt + PdtIndex;
+ }
+ }
+ }
+ if (!PDPTEIgnore) {
+ //
+ // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
+ // it should only has the entries point to 2 MByte Pages
+ //
+ Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
+ if (Acc < MinAcc) {
+ //
+ // If the PDPT entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = (UINTN)-1;
+ ReleasePageAddress = Pdpt + PdptIndex;
+ }
+ }
+ }
+ }
+ if (!PML4EIgnore) {
+ //
+ // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
+ // it should only has the entries point to 1 GByte Pages
+ //
+ Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
+ if (Acc < MinAcc) {
+ //
+ // If the PML4 entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml4 = Pml4Index;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ ReleasePageAddress = Pml4 + Pml4Index;
+ }
+ }
+ }
+ //
+ // Make sure one PML4/PDPT/PD entry is selected
+ //
+ ASSERT (MinAcc != (UINT64)-1);
+
+ //
+ // Secondly, insert the page pointed by this entry into page pool and clear this entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));
+ *ReleasePageAddress = 0;
+
+ //
+ // Lastly, check this entry's upper entries if need to be inserted into page pool
+ // or not
+ //
+ while (TRUE) {
+ if (MinPdt != (UINTN)-1) {
+ //
+ // If 4 KByte Page Table is released, check the PDPT entry
+ //
+ Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);
+ SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
+ if (SubEntriesNum == 0) {
+ //
+ // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
+ // clear the Page directory entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));
+ Pdpt[MinPdpt] = 0;
+ //
+ // Go on checking the PML4 table
+ //
+ MinPdt = (UINTN)-1;
+ continue;
+ }
+ //
+ // Update the sub-entries filed in PDPT entry and exit
+ //
+ SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
+ break;
+ }
+ if (MinPdpt != (UINTN)-1) {
+ //
+ // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
+ //
+ SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
+ if (SubEntriesNum == 0) {
+ //
+ // Release the empty PML4 table if there was no more 1G KByte Page Table entry
+ // clear the Page directory entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));
+ Pml4[MinPml4] = 0;
+ MinPdpt = (UINTN)-1;
+ continue;
+ }
+ //
+ // Update the sub-entries filed in PML4 entry and exit
+ //
+ SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
+ break;
+ }
+ //
+ // PLM4 table has been released before, exit it
+ //
+ break;
+ }
+}
+
+/**
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
+
+**/
+UINT64
+AllocPage (
+ VOID
+ )
+{
+ UINT64 RetVal;
+
+ if (IsListEmpty (&mPagePool)) {
+ //
+ // If page pool is empty, reclaim the used pages and insert one into page pool
+ //
+ ReclaimPages ();
+ }
+
+ //
+ // Get one free page and remove it from page pool
+ //
+ RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
+ RemoveEntryList (mPagePool.ForwardLink);
+ //
+ // Clean this page and return
+ //
+ ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
+ return RetVal;
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ UINT64 *PageTable;
+ UINT64 *Pml4;
+ UINT64 PFAddress;
+ UINTN StartBit;
+ UINTN EndBit;
+ UINT64 PTIndex;
+ UINTN Index;
+ SMM_PAGE_SIZE_TYPE PageSize;
+ UINTN NumOfPages;
+ UINTN PageAttribute;
+ EFI_STATUS Status;
+ UINT64 *UpperEntry;
+
+ EndBit = 0;
+ Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
+ PFAddress = AsmReadCr2 ();
+
+ Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
+ //
+ // If platform not support page table attribute, set default SMM page attribute
+ //
+ if (Status != EFI_SUCCESS) {
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+ }
+ if (PageSize >= MaxSmmPageSizeType) {
+ PageSize = SmmPageSize2M;
+ }
+ if (NumOfPages > 512) {
+ NumOfPages = 512;
+ }
+
+ switch (PageSize) {
+ case SmmPageSize4K:
+ //
+ // BIT12 to BIT20 is Page Table index
+ //
+ EndBit = 12;
+ break;
+ case SmmPageSize2M:
+ //
+ // BIT21 to BIT29 is Page Directory index
+ //
+ EndBit = 21;
+ PageAttribute |= IA32_PG_PS;
+ break;
+ case SmmPageSize1G:
+ if (!m1GPageTableSupport) {
+ DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));
+ ASSERT (FALSE);
+ }
+ //
+ // BIT30 to BIT38 is Page Directory Pointer Table index
+ //
+ EndBit = 30;
+ PageAttribute |= IA32_PG_PS;
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+
+ for (Index = 0; Index < NumOfPages; Index++) {
+ PageTable = Pml4;
+ UpperEntry = NULL;
+ for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
+ //
+ // If the entry is not present, allocate one page from page pool for it
+ //
+ PageTable[PTIndex] = AllocPage () | IA32_PG_RW | IA32_PG_P;
+ } else {
+ //
+ // Save the upper entry address
+ //
+ UpperEntry = PageTable + PTIndex;
+ }
+ //
+ // BIT9 to BIT11 of entry is used to save access record,
+ // initailze value is 7
+ //
+ PageTable[PTIndex] |= IA32_PG_A;
+ SetAccNum (PageTable + PTIndex, 7);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
+ }
+
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ //
+ // Check if the entry has already existed, this issue may occur when the different
+ // size page entries created under the same entry
+ //
+ DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
+ DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));
+ ASSERT (FALSE);
+ }
+ //
+ // Fill the new entry
+ //
+ PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |
+ PageAttribute | IA32_PG_A | IA32_PG_RW | IA32_PG_P;
+ if (UpperEntry != NULL) {
+ SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
+ }
+ //
+ // Get the next page address if we need to create more page tables
+ //
+ PFAddress += (1ull << EndBit);
+ }
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (&mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ //
+ // If a page fault occurrs in SMRAM range, it should be in a SMM stack guard page.
+ //
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
+ CpuDeadLoop ();
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextX64->Rip,
+ SystemContext.SystemContextX64->ExceptionData
+ );
+ } else {
+ SmiDefaultPFHandler ();
+ }
+
+ ReleaseSpinLock (&mPFLock);
+}
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/Semaphore.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/Semaphore.c
new file mode 100644
index 0000000000..534fe480b0
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/Semaphore.c
@@ -0,0 +1,104 @@
+/** @file
+ Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+ after SMBASE relocation.
+
+ Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#define LMA BIT10
+
+UINTN mSmmRelocationOriginalAddress;
+BOOLEAN *mRebasedFlag;
+extern UINT32 mSmmRelocationOriginalAddressPtr32;
+extern UINT32 mRebasedFlagAddr32;
+
+/**
+ AP Seamphore operation in 32-bit mode while BSP runs in 64-bit mode.
+**/
+VOID
+SmmRelocationSemaphoreComplete32 (
+ VOID
+ );
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex
+ )
+{
+ UINTN FunctionPointer;
+ UINT64 Efer;
+ UINT16 AutoHaltRestart;
+ SOCKET_LGA_775_SMM_CPU_STATE *CpuState;
+ UINTN TempValue;
+
+ mRebasedFlag = (BOOLEAN *) &mRebased[CpuIndex];
+
+ //
+ // If BSP runs in 64-bit mode executing boot script while APs rendezvous
+ // in 32-bit mode. In this case semaphore code should be 32-bit code instead of x64 code.
+ //
+ // Note that this module is loaded below 4G, so truncation of 64-bit address to 32-bit for 32-bit semaphore
+ // code is safe.
+ //
+ CpuState = NULL;
+
+ {
+ CpuState = (SOCKET_LGA_775_SMM_CPU_STATE *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_CPU_STATE_OFFSET);
+
+ //
+ // We are now in 64-bit mode, so SMM Save State Map must be x64 format.
+ //
+
+ mSmmRelocationOriginalAddress = CpuState->x64._RIP;
+ Efer = CpuState->x64.IA32_EFER;
+ AutoHaltRestart = CpuState->x86.AutoHALTRestart;
+ }
+
+ if ((Efer & LMA) == 0) {
+ //
+ // Use temp value to fix ICC complier warning
+ //
+ TempValue = (UINTN)&mSmmRelocationOriginalAddress;
+ mSmmRelocationOriginalAddressPtr32 = (UINT32)TempValue;
+ mRebasedFlagAddr32 = (UINT32)(UINTN)mRebasedFlag;
+
+ FunctionPointer = (UINTN)&SmmRelocationSemaphoreComplete32;
+ } else {
+ FunctionPointer = (UINTN)&SmmRelocationSemaphoreComplete;
+ }
+
+ {
+ CpuState->x64._RIP = FunctionPointer;
+ }
+
+ if ((AutoHaltRestart & BIT0) != 0) {
+ //
+ // Clear the auto HALT restart flag so the RSM instruction returns
+ // program control to the instruction following the HLT instruction,
+ // actually returns to SmmRelocationSemaphoreComplete
+ //
+ {
+ CpuState->x86.AutoHALTRestart &= ~BIT0;
+ }
+ }
+}
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.S
new file mode 100644
index 0000000000..9648594e78
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.S
@@ -0,0 +1,236 @@
+## @file
+# Code template of the SMI handler for a particular processor
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+
+ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)
+ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
+ASM_GLOBAL ASM_PFX(gSmiCr3)
+ASM_GLOBAL ASM_PFX(gcSmiHandlerOffset)
+ASM_GLOBAL ASM_PFX(gSmiStack)
+ASM_GLOBAL ASM_PFX(gSmbase)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+
+#
+# Constants relating to PROCESSOR_SMM_DESCRIPTOR
+#
+.equ DSC_OFFSET, 0xfb00
+.equ DSC_GDTPTR, 0x30
+.equ DSC_GDTSIZ, 0x38
+.equ DSC_CS, 14
+.equ DSC_DS, 16
+.equ DSC_SS, 18
+.equ DSC_OTHERSEG, 20
+.equ MSR_DR6, 0x0c05
+.equ MSR_DR7, 0x0c06
+
+.equ PROTECT_MODE_CS, 0x10
+.equ PROTECT_MODE_DS, 0x18
+.equ TSS_SEGMENT, 0x40
+.equ GDT_SIZE, 0x50
+
+#
+# Constants relating to CPU State Save Area
+#
+.equ SSM_DR6, 0xffd0
+.equ SSM_DR7, 0xffc8
+
+
+ .data
+
+ ASM_PFX(gcSmiHandlerOffset): .word _SmiHandler - _SmiEntryPoint + 0x8000
+
+ .text
+
+ASM_PFX(gcSmiHandlerTemplate):
+
+_SmiEntryPoint:
+ #
+ # The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
+ # bit addressing mode. And that coincidence has been used in the following
+ # "64-bit like" 16-bit code. Be aware that once RDI is referrenced as a
+ # base address register, it is actually BX that is referrenced.
+ #
+ .byte 0xbb # mov bx, imm16
+ .word _GdtDesc - _SmiEntryPoint + 0x8000
+ #
+ # fix GDT TSS table
+ #
+ .byte 0x66, 0x2e, 0xa1 # mov eax, cs:[offset16] ; eax = GDT base
+ .word DSC_OFFSET + DSC_GDTPTR
+ movw %ax, %dx
+ movw %ax, %bp # ebp = GDT base
+ addw $GDT_SIZE, %dx # edx = TSS descriptor base
+ movl %edx, (TSS_SEGMENT+2)(%eax)
+ shr $16, %dx
+ movb %dl, (TSS_SEGMENT + 4)(%eax)
+ movb %dh, (TSS_SEGMENT + 7)(%eax)
+ #
+ # fix GDT descriptor
+ #
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTSIZ
+ .byte 0x48 # dec ax
+ .byte 0x2e
+ movl %eax, (%rdi) # mov cs:[bx], ax
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTPTR
+ .byte 0x2e
+ movw %ax, 2(%rdi)
+ .byte 0x66,0x2e
+ lgdt (%rdi)
+ .byte 0x66,0xb8 # mov eax, imm32
+ASM_PFX(gSmiCr3): .space 4
+ movq %rax, %cr3
+ .byte 0x66
+ movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3
+ movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.
+ #
+ # Patch LongMode Segment
+ #
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_CS
+ .byte 0x2e
+ movl %eax, (LongMode - ProtectedMode + 4)(%rdi)
+ #
+ # Patch ProtectedMode Segment
+ #
+ .byte 0xb8
+ .word PROTECT_MODE_CS
+ .byte 0x2e
+ movl %eax, -2(%rdi)
+ #
+ # Patch LongMode entry
+ #
+ .byte 0x66, 0xbf # mov edi, SMBASE
+ASM_PFX(gSmbase): .space 4
+ lea ((LongMode - _SmiEntryPoint) + 0x8000)(%edi), %ax
+ .byte 0x2e
+ movw %ax, (LongMode - ProtectedMode)(%rdi)
+ #
+ # Patch ProtectedMode entry
+ #
+ lea ((ProtectedMode - _SmiEntryPoint) + 0x8000)(%edi), %ax
+ .byte 0x2e
+ movw %ax, -6(%rdi)
+ #
+ # Switch into ProtectedMode
+ #
+ .byte 0x66
+ movl $0xc0000080,%ecx
+
+ movq %cr0, %rbx
+ .byte 0x66
+ andl $0x9ffafff3, %ebx
+ .byte 0x66
+ orl $0x00000023, %ebx
+
+ movq %rbx, %cr0
+ .byte 0x66, 0xea
+ .space 6
+
+_GdtDesc: .space 6
+
+ProtectedMode:
+ movw $PROTECT_MODE_DS, %ax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+
+ #
+ # Load TSS
+ #
+ movl %ebp, %eax # ebp = GDT base
+ movl %eax, %edx
+ movb $0x89, %dl
+ movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
+ movl $TSS_SEGMENT, %eax
+ ltr %ax
+
+ #
+ # Switch to LongMode
+ #
+ rdmsr
+ orb $1,%ah
+ wrmsr
+ btsl $31, %ebx
+ movq %rbx, %cr0
+ .byte 0x67, 0xea
+ .space 6
+
+LongMode: # long mode (64-bit code) starts here
+ lea (DSC_OFFSET)(%rdi), %ebx
+ movw DSC_DS(%rbx), %ax
+ movl %eax,%ds
+ movw DSC_OTHERSEG(%rbx), %ax
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+ movw DSC_SS(%rbx), %ax
+ movl %eax,%ss
+# jmp _SmiHandler ; instruction is not needed
+
+_SmiHandler:
+ .byte 0x48,0xbc # mov rsp, imm64
+ASM_PFX(gSmiStack): .space 8
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug)), %rax
+ cmpb $0, (%rax)
+ jz L1
+
+ jz L3
+
+L3:
+ .byte 0x48, 0x8b, 0x0d # mov rcx, [rip + disp32]
+ .long SSM_DR6 - (. + 4 - _SmiEntryPoint + 0x8000)
+ .byte 0x48, 0x8b, 0x15 # mov rdx, [rip + disp32]
+ .long SSM_DR7 - (. + 4 - _SmiEntryPoint + 0x8000)
+L4:
+ movq %rcx, %dr6
+ movq %rdx, %dr7
+L1:
+
+ movabsq $ASM_PFX(SmiRendezvous), %rax
+ movq (%rsp), %rcx
+ # Save FP registers
+
+ subq $0x208, %rsp
+ .byte 0x48 # FXSAVE64
+ fxsave (%rsp)
+
+ addq $-0x20, %rsp
+ call *%rax
+ addq $0x20, %rsp
+
+ #
+ # Restore FP registers
+ #
+ .byte 0x48 # FXRSTOR64
+ fxrstor (%rsp)
+
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug)), %rax
+ cmpb $0, (%rax)
+ jz L2
+
+ movq %dr7, %rdx
+ movq %dr6, %rcx
+ .byte 0x48, 0x89, 0x15 # mov [rip + disp32], rdx
+ .long SSM_DR7 - (. + 4 - _SmiEntryPoint + 0x8000)
+ .byte 0x48, 0x89, 0x0d # mov [rip + disp32], rcx
+ .long SSM_DR6 - (. + 4 - _SmiEntryPoint + 0x8000)
+L2:
+
+ rsm
+
+ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.asm
new file mode 100644
index 0000000000..7e174b2373
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiEntry.asm
@@ -0,0 +1,232 @@
+;; @file
+; Code template of the SMI handler for a particular processor
+;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+;
+; Constants relating to PROCESSOR_SMM_DESCRIPTOR
+;
+DSC_OFFSET EQU 0fb00h
+DSC_GDTPTR EQU 30h
+DSC_GDTSIZ EQU 38h
+DSC_CS EQU 14
+DSC_DS EQU 16
+DSC_SS EQU 18
+DSC_OTHERSEG EQU 20
+MSR_DR6 EQU 0c05h
+MSR_DR7 EQU 0c06h
+;
+; Constants relating to CPU State Save Area
+;
+SSM_DR6 EQU 0ffd0h
+SSM_DR7 EQU 0ffc8h
+
+;
+; Variables referrenced by C code
+;
+EXTERNDEF SmiRendezvous:PROC
+EXTERNDEF gcSmiHandlerTemplate:BYTE
+EXTERNDEF gcSmiHandlerSize:WORD
+EXTERNDEF gSmiCr3:DWORD
+EXTERNDEF gcSmiHandlerOffset:WORD
+EXTERNDEF gSmiStack:QWORD
+EXTERNDEF gSmbase:DWORD
+EXTERNDEF FeaturePcdGet (PcdCpuSmmDebug):BYTE
+
+PROTECT_MODE_CS EQU 10h
+PROTECT_MODE_DS EQU 18h
+TSS_SEGMENT EQU 40h
+GDT_SIZE EQU 50h
+
+ .const
+
+gcSmiHandlerOffset DW _SmiHandler - _SmiEntryPoint + 8000h
+
+ .code
+
+gcSmiHandlerTemplate LABEL BYTE
+
+_SmiEntryPoint PROC
+ ;
+ ; The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
+ ; bit addressing mode. And that coincidence has been used in the following
+ ; "64-bit like" 16-bit code. Be aware that once RDI is referrenced as a
+ ; base address register, it is actually BX that is referrenced.
+ ;
+ DB 0bbh ; mov bx, imm16
+ DW offset _GdtDesc - _SmiEntryPoint + 8000h ; bx = GdtDesc offset
+
+; fix GDT TSS table
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16] ; eax = GDT base
+ DW DSC_OFFSET + DSC_GDTPTR
+ mov dx, ax
+ mov bp, ax ; ebp = GDT base
+ add dx, GDT_SIZE ; edx = TSS descriptor base
+ mov [eax + TSS_SEGMENT + 2],edx
+ shr dx, 16
+ mov [eax + TSS_SEGMENT + 4], dl
+ mov [eax + TSS_SEGMENT + 7], dh
+; fix GDT descriptor
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTSIZ
+ DB 48h ; dec ax
+ DB 2eh
+ mov [rdi], eax ; mov cs:[bx], ax
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTPTR
+ DB 2eh
+ mov [rdi + 2], ax ; mov cs:[bx + 2], eax
+ DB 66h, 2eh
+ lgdt fword ptr [rdi] ; lgdt fword ptr cs:[bx]
+ DB 66h, 0b8h ; mov eax, imm32
+gSmiCr3 DD ?
+ mov cr3, rax
+ DB 66h
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+; Patch LongMode Segment
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_CS
+ DB 2eh
+ mov [rdi + (sizeof (FWORD) + (@LongMode - @ProtectedMode)) - 2], eax ; mov cs:[bx - 2], ax
+; Patch ProtectedMode Segment
+ DB 0b8h ; mov ax, imm16
+ DW PROTECT_MODE_CS ; set AX for segment directly
+ DB 2eh
+ mov [rdi - 2], eax ; mov cs:[bx - 2], ax
+; Patch LongMode entry
+ DB 66h, 0bfh ; mov edi, SMBASE
+gSmbase DD ?
+ lea ax, [edi + (@LongMode - _SmiEntryPoint) + 8000h]
+ DB 2eh
+ mov [rdi + (sizeof (FWORD) + (@LongMode - @ProtectedMode)) - 6], ax ; mov cs:[bx - 6], eax
+; Patch ProtectedMode entry
+ lea ax, [edi + (@ProtectedMode - _SmiEntryPoint) + 8000h]
+ DB 2eh
+ mov [rdi - 6], ax ; mov cs:[bx - 6], eax
+; Switch into @ProtectedMode
+ DB 66h
+ mov ecx, 0c0000080h
+ mov rbx, cr0
+ DB 66h
+ and ebx, 9ffafff3h
+ DB 66h
+ or ebx, 00000023h
+
+ mov cr0, rbx
+ DB 66h, 0eah
+ DD ?
+ DW ?
+
+_GdtDesc FWORD ?
+@ProtectedMode:
+ mov ax, PROTECT_MODE_DS
+ mov ds, eax
+ mov es, eax
+ mov ss, eax
+
+; Load TSS
+ mov eax, ebp ; ebp = GDT base
+ mov edx, eax
+ mov dl, 89h
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
+
+ mov eax, TSS_SEGMENT
+ ltr ax
+
+; Switch into @LongMode
+ rdmsr
+ or ah, 1
+ wrmsr
+ bts ebx, 31
+ mov cr0, rbx
+ DB 67h, 0eah
+ DD ?
+ DW ?
+
+@LongMode: ; long mode (64-bit code) starts here
+ lea ebx, [rdi + DSC_OFFSET]
+ mov ax, [rbx + DSC_DS]
+ mov ds, eax
+ mov ax, [rbx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [rbx + DSC_SS]
+ mov ss, eax
+; jmp _SmiHandler ; instruction is not needed
+_SmiEntryPoint ENDP
+
+_SmiHandler PROC
+ DB 48h, 0bch ; mov rsp, imm64
+gSmiStack DQ ?
+;
+; The following lines restore DR6 & DR7 before running C code. They are useful
+; when you want to enable hardware breakpoints in SMM without setting
+; smmentrybreak=true in ITP.
+;
+; NOTE: These lines might not be appreciated in runtime since they might
+; conflict with OS debugging facilities. Turn them off in RELEASE.
+;
+ mov rax, offset FeaturePcdGet (PcdCpuSmmDebug) ;Get absolute address. Avoid RIP relative addressing
+ cmp byte ptr [rax], 0
+ jz @1
+
+ jz @F
+
+@@:
+ DB 48h, 8bh, 0dh ; mov rcx, [rip + disp32]
+ DD SSM_DR6 - ($ + 4 - _SmiEntryPoint + 8000h)
+ DB 48h, 8bh, 15h ; mov rdx, [rip + disp32]
+ DD SSM_DR7 - ($ + 4 - _SmiEntryPoint + 8000h)
+@3:
+ mov dr6, rcx
+ mov dr7, rdx
+@1:
+ mov rcx, [rsp] ; rcx <- CpuIndex
+ mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
+
+ ;
+ ; Save FP registers
+ ;
+ sub rsp, 208h
+ DB 48h ; FXSAVE64
+ fxsave [rsp]
+
+ add rsp, -20h
+ call rax
+ add rsp, 20h
+
+ ;
+ ; Restore FP registers
+ ;
+ DB 48h ; FXRSTOR64
+ fxrstor [rsp]
+
+ mov rax, offset FeaturePcdGet (PcdCpuSmmDebug) ;Get absolute address. Avoid RIP relative addressing
+ cmp byte ptr [rax], 0
+ jz @2
+
+ mov rdx, dr7
+ mov rcx, dr6
+ DB 48h, 89h, 15h ; mov [rip + disp32], rdx
+ DD SSM_DR7 - ($ + 4 - _SmiEntryPoint + 8000h)
+ DB 48h, 89h, 0dh ; mov [rip + disp32], rcx
+ DD SSM_DR6 - ($ + 4 - _SmiEntryPoint + 8000h)
+@2:
+ rsm
+_SmiHandler ENDP
+
+gcSmiHandlerSize DW $ - _SmiEntryPoint
+
+ END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.S
new file mode 100644
index 0000000000..e786a4395d
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.S
@@ -0,0 +1,822 @@
+## @file
+# Exception handlers used in SM mode
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+
+ASM_GLOBAL ASM_PFX(gSmiMtrrs)
+ASM_GLOBAL ASM_PFX(gcSmiIdtr)
+ASM_GLOBAL ASM_PFX(gcSmiGdtr)
+ASM_GLOBAL ASM_PFX(gcPsd)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ASM_GLOBAL ASM_PFX(gSavedPageFaultIdtEntry)
+ASM_GLOBAL ASM_PFX(gSavedDebugExceptionIdtEntry)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+ASM_GLOBAL ASM_PFX(InitializeSmmExternalVectorTablePtr)
+
+ .data
+
+NullSeg: .quad 0
+ .quad 0 # reserved for future use
+CodeSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+DataSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x93
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+ .quad 0 # reserved for future use
+CodeSeg16:
+ .word -1
+ .word 0
+ .byte 0
+ .byte 0x9b
+ .byte 0x8f
+ .byte 0
+DataSeg16:
+ .word -1
+ .word 0
+ .byte 0
+ .byte 0x93
+ .byte 0x8f
+ .byte 0
+CodeSeg64:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xaf # LimitHigh
+ .byte 0 # BaseHigh
+# TSS Segment for X64 specially
+TssSeg:
+ .word TSS_DESC_SIZE # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x89
+ .byte 0xDB # LimitHigh
+ .byte 0 # BaseHigh
+ .long 0 # BaseUpper
+ .long 0 # Reserved
+.equ GDT_SIZE, .- NullSeg
+
+TssDescriptor:
+ .space 104, 0
+.equ TSS_DESC_SIZE, .- TssDescriptor
+
+#
+# This structure serves as a template for all processors.
+#
+ASM_PFX(gcPsd):
+ .ascii "PSDSIG "
+ .word PSD_SIZE
+ .word 2
+ .word 1 << 2
+ .word CODE_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word 0
+ .quad 0
+ .quad 0
+ .quad 0 # fixed in InitializeMpServiceData()
+ .quad NullSeg
+ .long GDT_SIZE
+ .long 0
+ .space 24, 0
+ .quad ASM_PFX(gSmiMtrrs)
+.equ PSD_SIZE, . - ASM_PFX(gcPsd)
+
+#
+# CODE & DATA segments for SMM runtime
+#
+.equ CODE_SEL, CodeSeg64 - NullSeg
+.equ DATA_SEL, DataSeg32 - NullSeg
+
+ASM_PFX(gcSmiGdtr):
+ .word GDT_SIZE - 1
+ .quad NullSeg
+
+ASM_PFX(gcSmiIdtr):
+ .word IDT_SIZE - 1
+ .quad _SmiIDT
+
+
+#
+# Here is the IDT. There are 32 (not 255) entries in it since only processor
+# generated exceptions will be handled.
+#
+_SmiIDT:
+# The following segment repeats 32 times:
+# No. 1
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 2
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 3
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 4
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 5
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 6
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 7
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 8
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 9
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 10
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 11
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 12
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 13
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 14
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 15
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 16
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 17
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 18
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 19
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 20
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 21
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 22
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 23
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 24
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 25
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 26
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 27
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 28
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 29
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 30
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 31
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+# No. 32
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+ .quad 0 # Offset 32:63
+
+_SmiIDTEnd:
+
+.equ IDT_SIZE, (_SmiIDTEnd - _SmiIDT)
+
+#
+# Saved IDT Entry for Page Fault
+#
+ASM_PFX(gSavedPageFaultIdtEntry):
+ .quad 0
+ .quad 0
+
+#
+# Saved IDT Entry for INT 1
+#
+ASM_PFX(gSavedDebugExceptionIdtEntry):
+ .quad 0
+ .quad 0
+
+ExternalVectorTablePtr: .quad 0 # point to the external interrupt vector table
+
+#
+# Here are the global variables used by #PF exception handler.
+#
+_PFPML4: .long 0
+_PFPDP: .long 0
+_PFLOCK: .byte 0
+
+ .text
+
+ASM_PFX(InitializeSmmExternalVectorTablePtr):
+ movq %rcx, ExternalVectorTablePtr(%rip)
+ ret
+
+#------------------------------------------------------------------------------
+# _SmiExceptionEntryPoints is the collection of exception entrypoints followed
+# by a common exception handler.
+#
+# Stack frame would be as follows as specified in IA32 manuals:
+# +---------------------+ <-- 16-byte aligned ensured by processor
+# + Old SS +
+# +---------------------+
+# + Old RSP +
+# +---------------------+
+# + RFlags +
+# +---------------------+
+# + CS +
+# +---------------------+
+# + RIP +
+# +---------------------+
+# + Error Code +
+# +---------------------+
+# + Vector Number +
+# +---------------------+
+# + RBP +
+# +---------------------+ <-- RBP, 16-byte aligned
+#
+# RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT
+#------------------------------------------------------------------------------
+_SmiExceptionEntryPoints:
+.equ IHDLRIDX, 0
+# The following segment repeats 31 times:
+# No. 1
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 2
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 3
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 4
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 5
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 6
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 7
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 8
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 9
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 10
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 11
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 12
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 13
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 14
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 15
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 16
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 17
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 18
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 19
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 20
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 21
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 22
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 23
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 24
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 25
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 26
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 27
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 28
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 29
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 30
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+# No. 31
+ pushq $IHDLRIDX
+ jmp CommonEntryPoint
+.equ IHDLRIDX, IHDLRIDX + 1
+
+
+ pushq $IHDLRIDX
+CommonEntryPoint:
+ .byte 0x40, 0xf6, 0xc4, 0x08 #test spl, 8
+ jnz L1
+ pushq (%rsp)
+ movq $0, 8(%rsp)
+L1:
+ pushq %rbp
+ movq %rsp, %rbp
+
+ #
+ # Since here the stack pointer is 16-byte aligned, so
+ # EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
+ # is 16-byte aligned
+ #
+
+## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+## UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %r11
+ pushq %r10
+ pushq %r9
+ pushq %r8
+ pushq %rax
+ pushq %rcx
+ pushq %rdx
+ pushq %rbx
+ pushq 48(%rbp) # RSP
+ pushq (%rbp) # RBP
+ pushq %rsi
+ pushq %rdi
+
+## UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
+ movzwq 56(%rbp), %rax
+ pushq %rax # for ss
+ movzwq 32(%rbp), %rax
+ pushq %rax # for cs
+ movq %ds, %rax
+ pushq %rax
+ movq %es, %rax
+ pushq %rax
+ movq %fs, %rax
+ pushq %rax
+ movq %gs, %rax
+ pushq %rax
+
+## UINT64 Rip;
+ pushq 24(%rbp)
+
+## UINT64 Gdtr[2], Idtr[2];
+ subq $16, %rsp
+ sidt (%rsp)
+ subq $16, %rsp
+ sgdt (%rsp)
+
+## UINT64 Ldtr, Tr;
+ xorq %rax, %rax
+ strw %ax
+ pushq %rax
+ sldtw %ax
+ pushq %rax
+
+## UINT64 RFlags;
+ pushq 40(%rbp)
+
+## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ movq %cr8, %rax
+ pushq %rax
+ movq %cr4, %rax
+ orq $0x208, %rax
+ movq %rax, %cr4
+ pushq %rax
+ movq %cr3, %rax
+ pushq %rax
+ movq %cr2, %rax
+ pushq %rax
+ xorq %rax, %rax
+ pushq %rax
+ movq %cr0, %rax
+ pushq %rax
+
+## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ movq %dr7, %rax
+ pushq %rax
+ movq %dr6, %rax
+ pushq %rax
+ movq %dr3, %rax
+ pushq %rax
+ movq %dr2, %rax
+ pushq %rax
+ movq %dr1, %rax
+ pushq %rax
+ movq %dr0, %rax
+ pushq %rax
+
+## FX_SAVE_STATE_X64 FxSaveState;
+
+ subq $512, %rsp
+ movq %rsp, %rdi
+ .byte 0xf, 0xae, 0x7 # fxsave [rdi]
+
+# UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
+ cld
+
+## UINT32 ExceptionData;
+ pushq 16(%rbp)
+
+## call into exception handler
+ movq 8(%rbp), %rcx
+ movq ExternalVectorTablePtr, %rax # get the interrupt vectors base
+ movq (%rax, %rcx, 8), %rax
+ orq %rax, %rax # NULL?
+
+ je nonNullValue;
+
+## Prepare parameter and call
+ movq %rsp, %rdx
+ #
+ # Per X64 calling convention, allocate maximum parameter stack space
+ # and make sure RSP is 16-byte aligned
+ #
+ subq $4 * 8 + 8, %rsp
+ call *%rax
+ addq $4 * 8 + 8, %rsp
+ jmp L5
+
+nonNullValue:
+# CpuDeadLoop() is the default exception handler since it preserves the processor
+# branch log.
+ call ASM_PFX(CpuDeadLoop)
+
+L5:
+## UINT64 ExceptionData;
+ addq $8, %rsp
+
+## FX_SAVE_STATE_X64 FxSaveState;
+
+ movq %rsp, %rsi
+ .byte 0xf, 0xae, 0xe # fxrstor [rsi]
+ addq $512, %rsp
+
+## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+## Skip restoration of DRx registers to support in-circuit emualators
+## or debuggers set breakpoint in interrupt/exception context
+ addq $8 * 6, %rsp
+
+## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ popq %rax
+ movq %rax, %cr0
+ addq $8, %rsp # not for Cr1
+ popq %rax
+ movq %rax, %cr2
+ popq %rax
+ movq %rax, %cr3
+ popq %rax
+ movq %rax, %cr4
+ popq %rax
+ movq %rax, %cr8
+
+## UINT64 RFlags;
+ popq 40(%rbp)
+
+## UINT64 Ldtr, Tr;
+## UINT64 Gdtr[2], Idtr[2];
+## Best not let anyone mess with these particular registers...
+ addq $48, %rsp
+
+## UINT64 Rip;
+ popq 24(%rbp)
+
+## UINT64 Gs, Fs, Es, Ds, Cs, Ss;
+ popq %rax
+ # mov gs, rax ; not for gs
+ popq %rax
+ # mov fs, rax ; not for fs
+ # (X64 will not use fs and gs, so we do not restore it)
+ popq %rax
+ movq %rax, %es
+ popq %rax
+ movq %rax, %ds
+ popq 32(%rbp) # for cs
+ popq 56(%rbp) # for ss
+
+## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+## UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ popq %rdi
+ popq %rsi
+ addq $8, %rsp # not for rbp
+ popq 48(%rbp) # for rsp
+ popq %rbx
+ popq %rdx
+ popq %rcx
+ popq %rax
+ popq %r8
+ popq %r9
+ popq %r10
+ popq %r11
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+
+ movq %rbp, %rsp
+
+# Set single step DB# if SMM profile is enabled and page fault exception happens
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable)), %rbp
+ cmpb $0, (%rbp)
+ jz Done
+# Check if this is page fault exception
+ cmpq $0xe, 8(%rsp)
+ jnz L6
+# Enable TF bit after page fault handler runs
+ btsl $8, 40(%rsp) #RFLAGS
+ jmp Done
+L6:
+# Check if this is INT 1 exception
+ cmpq $1, 8(%rsp)
+ jnz Done
+# Clear TF bit after INT1 handler runs
+ btcl $8, 40(%rsp) #RFLAGS
+
+Done:
+
+ popq %rbp
+ addq $16, %rsp # skip INT# & ErrCode
+ iretq
+
+ASM_GLOBAL ASM_PFX(InitializeIDT)
+ASM_PFX(InitializeIDT):
+ movl $((_SmiIDTEnd - _SmiIDT) >> 2), %ecx
+ movabsq $_SmiIDT - 16, %rdx
+ movabsq $_SmiExceptionEntryPoints - 4, %r10
+L2:
+ lea (%r10, %rcx), %rax
+ movw %ax, (%rdx, %rcx, 4)
+ shr $16, %rax
+ movw %ax, 6(%rdx, %rcx, 4)
+ shr $16, %rax
+ movl %eax, 8(%rdx, %rcx, 4)
+ addl $-4, %ecx
+ jnz L2
+
+
+# Get absolute address.
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard)), %rax
+ cmpb $0, (%rax)
+ jz L3
+
+#
+# If SMM Stack Guard feature is enabled, set the IST field of
+# the interrupe gate for Page Fault Exception to be 1
+#
+ movabsq $_SmiIDT + 14 * 16, %rax
+ movb $1, 4(%rax)
+L3:
+#
+# Save Page Fault IDT entry in gPageFaultIdtEntry
+#
+ movabsq $_SmiIDT + 14 * 16, %rcx
+ movabsq $ASM_PFX(gSavedPageFaultIdtEntry), %rdx
+ movq (%rcx), %rax
+ movq %rax, (%rdx)
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rdx)
+
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable)), %rax
+ cmpb $0, (%rax)
+ jz L4
+
+#
+# Save INT 1 IDT entry in gSavedDebugExceptionIdtEntry
+#
+ movabsq $_SmiIDT + 1 * 16, %rcx
+ movabsq $ASM_PFX(gSavedDebugExceptionIdtEntry), %rdx
+ movq (%rcx), %rax
+ movq %rax, (%rdx)
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rdx)
+
+L4:
+ ret
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.asm
new file mode 100644
index 0000000000..551f179848
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmiException.asm
@@ -0,0 +1,504 @@
+;; @file
+; Exception handlers used in SM mode
+;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+EXTERNDEF gSmiMtrrs:QWORD
+EXTERNDEF gcSmiIdtr:FWORD
+EXTERNDEF gcSmiGdtr:FWORD
+EXTERNDEF gcPsd:BYTE
+EXTERNDEF gPhyMask:QWORD
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
+EXTERNDEF gSavedPageFaultIdtEntry:QWORD
+EXTERNDEF gSavedDebugExceptionIdtEntry:QWORD
+EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE
+
+CpuDeadLoop PROTO
+
+ .const
+
+NullSeg DQ 0 ; reserved by architecture
+ DQ 0 ; reserved for future use
+CodeSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 93h
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+ DQ 0 ; reserved for future use
+CodeSeg16 LABEL QWORD
+ DW -1
+ DW 0
+ DB 0
+ DB 9bh
+ DB 8fh
+ DB 0
+DataSeg16 LABEL QWORD
+ DW -1
+ DW 0
+ DB 0
+ DB 93h
+ DB 8fh
+ DB 0
+CodeSeg64 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0afh ; LimitHigh
+ DB 0 ; BaseHigh
+; TSS Segment for X64 specially
+TssSeg LABEL QWORD
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 89h
+ DB 080h ; LimitHigh
+ DB 0 ; BaseHigh
+ DD 0 ; BaseUpper
+ DD 0 ; Reserved
+GDT_SIZE = $ - offset NullSeg
+
+; Create TSS Descriptor just after GDT
+TssDescriptor LABEL BYTE
+ DD 0 ; Reserved
+ DQ 0 ; RSP0
+ DQ 0 ; RSP1
+ DQ 0 ; RSP2
+ DD 0 ; Reserved
+ DD 0 ; Reserved
+ DQ 0 ; IST1
+ DQ 0 ; IST2
+ DQ 0 ; IST3
+ DQ 0 ; IST4
+ DQ 0 ; IST5
+ DQ 0 ; IST6
+ DQ 0 ; IST7
+ DD 0 ; Reserved
+ DD 0 ; Reserved
+ DW 0 ; Reserved
+ DW 0 ; I/O Map Base Address
+TSS_DESC_SIZE = $ - offset TssDescriptor
+
+;
+; This structure serves as a template for all processors.
+;
+gcPsd LABEL BYTE
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 SHL 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0 ; fixed in InitializeMpServiceData()
+ DQ offset NullSeg
+ DD GDT_SIZE
+ DD 0
+ DB 24 dup (0)
+ DQ offset gSmiMtrrs
+PSD_SIZE = $ - offset gcPsd
+
+;
+; CODE & DATA segments for SMM runtime
+;
+CODE_SEL = offset CodeSeg64 - offset NullSeg
+DATA_SEL = offset DataSeg32 - offset NullSeg
+
+gcSmiGdtr LABEL FWORD
+ DW GDT_SIZE - 1
+ DQ offset NullSeg
+
+gcSmiIdtr LABEL FWORD
+ DW IDT_SIZE - 1
+ DQ offset _SmiIDT
+
+ .data
+
+;
+; Here is the IDT. There are 32 (not 255) entries in it since only processor
+; generated exceptions will be handled.
+;
+_SmiIDT:
+REPEAT 32
+ DW 0 ; Offset 0:15
+ DW CODE_SEL ; Segment selector
+ DB 0 ; Unused
+ DB 8eh ; Interrupt Gate, Present
+ DW 0 ; Offset 16:31
+ DQ 0 ; Offset 32:63
+ ENDM
+_SmiIDTEnd:
+
+IDT_SIZE = (offset _SmiIDTEnd - offset _SmiIDT)
+
+;
+; Saved IDT Entry for Page Fault
+;
+gSavedPageFaultIdtEntry LABEL QWORD
+ DQ 0
+ DQ 0
+
+;
+; Saved IDT Entry for INT 1
+;
+gSavedDebugExceptionIdtEntry LABEL QWORD
+ DQ 0
+ DQ 0
+
+ExternalVectorTablePtr QWORD 0 ; point to the external interrupt vector table
+
+;
+; Here are the global variables used by #PF exception handler.
+;
+_PFPML4 DD 0
+_PFPDP DD 0
+_PFLOCK DB 0
+
+ .code
+
+InitializeSmmExternalVectorTablePtr PROC
+ mov ExternalVectorTablePtr, rcx
+ ret
+InitializeSmmExternalVectorTablePtr ENDP
+
+;------------------------------------------------------------------------------
+; _SmiExceptionEntryPoints is the collection of exception entrypoints followed
+; by a common exception handler.
+;
+; Stack frame would be as follows as specified in IA32 manuals:
+;
+; +---------------------+ <-- 16-byte aligned ensured by processor
+; + Old SS +
+; +---------------------+
+; + Old RSP +
+; +---------------------+
+; + RFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + RIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + RBP +
+; +---------------------+ <-- RBP, 16-byte aligned
+;
+; RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT
+;------------------------------------------------------------------------------
+_SmiExceptionEntryPoints PROC
+IHDLRIDX = 0
+REPEAT 31 ; handler for INT0~INT30
+ push IHDLRIDX
+ jmp @CommonEntryPoint
+IHDLRIDX = IHDLRIDX + 1
+ ENDM
+ push IHDLRIDX ; handler for INT31
+@CommonEntryPoint:
+ test spl, 8 ; odd multiple of 8 => ErrCode present
+ jnz @F
+ push [rsp] ; duplicate INT# if no ErrCode
+ mov qword ptr [rsp + 8], 0
+@@:
+ push rbp
+ mov rbp, rsp
+
+ ;
+ ; Since here the stack pointer is 16-byte aligned, so
+ ; EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
+ ; is 16-byte aligned
+ ;
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ push r15
+ push r14
+ push r13
+ push r12
+ push r11
+ push r10
+ push r9
+ push r8
+ push rax
+ push rcx
+ push rdx
+ push rbx
+ push qword ptr [rbp + 48] ; RSP
+ push qword ptr [rbp] ; RBP
+ push rsi
+ push rdi
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
+ movzx rax, word ptr [rbp + 56]
+ push rax ; for ss
+ movzx rax, word ptr [rbp + 32]
+ push rax ; for cs
+ mov rax, ds
+ push rax
+ mov rax, es
+ push rax
+ mov rax, fs
+ push rax
+ mov rax, gs
+ push rax
+
+;; UINT64 Rip;
+ push qword ptr [rbp + 24]
+
+;; UINT64 Gdtr[2], Idtr[2];
+ sub rsp, 16
+ sidt fword ptr [rsp]
+ sub rsp, 16
+ sgdt fword ptr [rsp]
+
+;; UINT64 Ldtr, Tr;
+ xor rax, rax
+ str ax
+ push rax
+ sldt ax
+ push rax
+
+;; UINT64 RFlags;
+ push qword ptr [rbp + 40]
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ mov rax, cr8
+ push rax
+ mov rax, cr4
+ or rax, 208h
+ mov cr4, rax
+ push rax
+ mov rax, cr3
+ push rax
+ mov rax, cr2
+ push rax
+ xor rax, rax
+ push rax
+ mov rax, cr0
+ push rax
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov rax, dr7
+ push rax
+ mov rax, dr6
+ push rax
+ mov rax, dr3
+ push rax
+ mov rax, dr2
+ push rax
+ mov rax, dr1
+ push rax
+ mov rax, dr0
+ push rax
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ sub rsp, 512
+ mov rdi, rsp
+ db 0fh, 0aeh, 00000111y ;fxsave [rdi]
+
+; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push qword ptr [rbp + 16]
+
+;; call into exception handler
+ mov rcx, [rbp + 8]
+ mov rax, ExternalVectorTablePtr ; get the interrupt vectors base
+ mov rax, [rax + rcx * 8]
+ or rax, rax ; NULL?
+
+ je nonNullValue;
+
+;; Prepare parameter and call
+ mov rdx, rsp
+ ;
+ ; Per X64 calling convention, allocate maximum parameter stack space
+ ; and make sure RSP is 16-byte aligned
+ ;
+ sub rsp, 4 * 8 + 8
+ call rax
+ add rsp, 4 * 8 + 8
+ jmp @F
+
+nonNullValue:
+; CpuDeadLoop() is the default exception handler since it preserves the processor
+; branch log.
+ call CpuDeadLoop
+
+@@:
+;; UINT64 ExceptionData;
+ add rsp, 8
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ mov rsi, rsp
+ db 0fh, 0aeh, 00001110y ; fxrstor [rsi]
+ add rsp, 512
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support in-circuit emualators
+;; or debuggers set breakpoint in interrupt/exception context
+ add rsp, 8 * 6
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ pop rax
+ mov cr0, rax
+ add rsp, 8 ; not for Cr1
+ pop rax
+ mov cr2, rax
+ pop rax
+ mov cr3, rax
+ pop rax
+ mov cr4, rax
+ pop rax
+ mov cr8, rax
+
+;; UINT64 RFlags;
+ pop qword ptr [rbp + 40]
+
+;; UINT64 Ldtr, Tr;
+;; UINT64 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add rsp, 48
+
+;; UINT64 Rip;
+ pop qword ptr [rbp + 24]
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
+ pop rax
+ ; mov gs, rax ; not for gs
+ pop rax
+ ; mov fs, rax ; not for fs
+ ; (X64 will not use fs and gs, so we do not restore it)
+ pop rax
+ mov es, rax
+ pop rax
+ mov ds, rax
+ pop qword ptr [rbp + 32] ; for cs
+ pop qword ptr [rbp + 56] ; for ss
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ pop rdi
+ pop rsi
+ add rsp, 8 ; not for rbp
+ pop qword ptr [rbp + 48] ; for rsp
+ pop rbx
+ pop rdx
+ pop rcx
+ pop rax
+ pop r8
+ pop r9
+ pop r10
+ pop r11
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+
+ mov rsp, rbp
+
+; Set single step DB# if SMM profile is enabled and page fault exception happens
+ mov rbp, offset FeaturePcdGet (PcdCpuSmmProfileEnable)
+ cmp byte ptr [rbp], 0
+ jz @Done
+; Check if this is page fault exception
+ cmp qword ptr [rsp + 8], 0eh
+ jnz @F
+; Enable TF bit after page fault handler runs
+ bts dword ptr [rsp + 40], 8 ;RFLAGS
+ jmp @Done
+@@:
+; Check if this is INT 1 exception
+ cmp qword ptr [rsp + 8], 1
+ jnz @Done
+; Clear TF bit after INT1 handler runs
+ btc dword ptr [rsp + 40], 8 ;RFLAGS
+
+@Done:
+
+ pop rbp
+ add rsp, 16 ; skip INT# & ErrCode
+ iretq
+_SmiExceptionEntryPoints ENDP
+
+InitializeIDT PROC
+ mov ecx, (_SmiIDTEnd - _SmiIDT) SHR 2
+ lea rdx, _SmiIDT - 16
+ lea r10, _SmiExceptionEntryPoints - 4
+@@:
+ lea rax, [r10 + rcx] ; rax <- handler address
+ mov [rdx + rcx*4], ax
+ shr rax, 16
+ mov [rdx + rcx*4 + 6], ax
+ shr rax, 16
+ mov [rdx + rcx*4 + 8], eax
+ add ecx, -4
+ jnz @B
+
+; Get absolute address. Avoid RIP relative addressing
+ mov rax, offset FeaturePcdGet (PcdCpuSmmStackGuard)
+ cmp byte ptr [rax], 0
+ jz @F
+
+;
+; If SMM Stack Guard feature is enabled, set the IST field of
+; the interrupe gate for Page Fault Exception to be 1
+;
+ lea rax, _SmiIDT + 14 * 16
+ mov byte ptr [rax + 4], 1
+@@:
+;
+; Save Page Fault IDT entry in gPageFaultIdtEntry
+;
+ lea rcx, _SmiIDT + 14 * 16
+ lea rdx, gSavedPageFaultIdtEntry
+ mov rax, [rcx]
+ mov [rdx], rax
+ mov rax, [rcx + 8]
+ mov [rdx + 8], rax
+
+ mov rax, offset FeaturePcdGet (PcdCpuSmmProfileEnable)
+ cmp byte ptr [rax], 0
+ jz @F
+
+;
+; Save INT 1 IDT entry in gSavedDebugExceptionIdtEntry
+;
+ lea rcx, _SmiIDT + 1 * 16
+ lea rdx, gSavedDebugExceptionIdtEntry
+ mov rax, [rcx]
+ mov [rdx], rax
+ mov rax, [rcx + 8]
+ mov [rdx + 8], rax
+@@:
+ ret
+InitializeIDT ENDP
+
+ END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.S b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.S
new file mode 100644
index 0000000000..1d1c7a5a2f
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.S
@@ -0,0 +1,123 @@
+## @file
+# Functions for relocating SMBASE's for all processors
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED
+#
+##
+
+
+ASM_GLOBAL ASM_PFX(gSmmCr0)
+ASM_GLOBAL ASM_PFX(gSmmCr3)
+ASM_GLOBAL ASM_PFX(gSmmCr4)
+ASM_GLOBAL ASM_PFX(gcSmmInitTemplate)
+ASM_GLOBAL ASM_PFX(gcSmmInitSize)
+ASM_GLOBAL ASM_PFX(gSmmJmpAddr)
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete32)
+ASM_GLOBAL ASM_PFX(mRebasedFlagAddr32)
+ASM_GLOBAL ASM_PFX(mSmmRelocationOriginalAddressPtr32)
+ASM_GLOBAL ASM_PFX(gSmmInitStack)
+
+ .data
+
+NullSeg: .quad 0
+CodeSeg64:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xaf # LimitHigh
+ .byte 0 # BaseHigh
+.equ GDT_SIZE, . - NullSeg
+
+ .text
+
+GdtDesc:
+ .word GDT_SIZE
+ .quad NullSeg
+
+SmmStartup:
+ .byte 0x66,0xb8 # mov eax, imm32
+ASM_PFX(gSmmCr3): .space 4
+ movq %rax, %cr3
+ .byte 0x66,0x2e
+ lgdt (GdtDesc - SmmStartup)(%ebp)
+ .byte 0x66,0xb8 # mov eax, imm32
+ASM_PFX(gSmmCr4): .space 4
+ movq %rax, %cr4
+ .byte 0x66
+ movl $0xc0000080,%ecx # IA32_EFER MSR
+ rdmsr
+ orb $1,%ah # set LME bit
+ wrmsr
+ .byte 0x66,0xb8 # mov eax, imm32
+ASM_PFX(gSmmCr0): .space 4
+ movq %rax, %cr0
+ .byte 0x66,0xea # far jmp to long mode
+ASM_PFX(gSmmJmpAddr): .quad LongMode
+LongMode: # long-mode starts here
+ .byte 0x48,0xbc # mov rsp, imm64
+ASM_PFX(gSmmInitStack): .space 8
+ andw $0xfff0, %sp # make sure RSP is 16-byte aligned
+ addq $-0x20, %rsp
+ call ASM_PFX(SmmInitHandler)
+ addq $0x20, %rsp
+ rsm
+
+ASM_PFX(gcSmmInitTemplate):
+
+_SmmInitTemplate:
+ .byte 0x66,0x2e,0x8b,0x2e # mov ebp, cs:[@F]
+ .word L1 - _SmmInitTemplate + 0x8000
+ .byte 0x66, 0x81, 0xed, 0, 0, 3, 0 # sub ebp, 0x30000
+ jmp *%bp # jmp ebp actually
+L1:
+ .quad SmmStartup
+
+ASM_PFX(gcSmmInitSize): .word . - ASM_PFX(gcSmmInitTemplate)
+
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ # Create a simple stack frame to store RAX and the original RSM location
+ pushq %rax # Used to store return address
+ pushq %rax
+
+ # Load the original RSM location onto stack
+ movabsq $ASM_PFX(mSmmRelocationOriginalAddress), %rax
+ movq (%rax), %rax
+ movq %rax, 0x08(%rsp)
+
+ # Update rebase flag
+ movabsq $ASM_PFX(mRebasedFlag), %rax
+ movq (%rax), %rax
+ movb $1, (%rax)
+
+ #restore RAX and return to original RSM location
+ popq %rax
+ retq
+
+#
+# Semaphore code running in 32-bit mode
+#
+ASM_PFX(SmmRelocationSemaphoreComplete32):
+ #
+ # movb $1, ()
+ #
+ .byte 0xc6, 0x05
+ASM_PFX(mRebasedFlagAddr32):
+ .long 0
+ .byte 1
+ #
+ # jmpd ()
+ #
+ .byte 0xff, 0x25
+ASM_PFX(mSmmRelocationOriginalAddressPtr32):
+ .long 0
+
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.asm b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.asm
new file mode 100644
index 0000000000..b5724d53c6
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmInit.asm
@@ -0,0 +1,140 @@
+;; @file
+; Functions for relocating SMBASE's for all processors
+;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+;
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;;
+
+SSM_SMBAS EQU 0fef8h
+SSM_IEDBAS EQU 0ff04h
+
+EXTERNDEF SmmInitHandler:PROC
+EXTERNDEF gSmmCr0:DWORD
+EXTERNDEF gSmmCr3:DWORD
+EXTERNDEF gSmmCr4:DWORD
+EXTERNDEF gSmmJmpAddr:QWORD
+EXTERNDEF gcSmmInitTemplate:BYTE
+EXTERNDEF gcSmmInitSize:WORD
+EXTERNDEF mRebasedFlag:PTR BYTE
+EXTERNDEF mSmmRelocationOriginalAddress:QWORD
+EXTERNDEF mRebasedFlagAddr32:DWORD
+EXTERNDEF mSmmRelocationOriginalAddressPtr32:DWORD
+EXTERNDEF gSmmInitStack:QWORD
+
+ .data
+
+NullSeg DQ 0 ; reserved by architecture
+CodeSeg64 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0afh ; LimitHigh
+ DB 0 ; BaseHigh
+GDT_SIZE = $ - offset NullSeg
+
+ .code
+
+GdtDesc LABEL FWORD
+ DW GDT_SIZE
+ DQ offset NullSeg
+
+SmmStartup PROC
+ DB 66h, 0b8h ; mov eax, imm32
+gSmmCr3 DD ?
+ mov cr3, rax
+ DB 66h, 2eh
+ lgdt fword ptr [ebp + (offset GdtDesc - SmmStartup)]
+ DB 66h, 0b8h ; mov eax, imm32
+gSmmCr4 DD ?
+ mov cr4, rax
+ DB 66h
+ mov ecx, 0c0000080h ; IA32_EFER MSR
+ rdmsr
+ or ah, 1 ; set LME bit
+ wrmsr
+ DB 66h, 0b8h ; mov eax, imm32
+gSmmCr0 DD ?
+ mov cr0, rax ; enable protected mode & paging
+ DB 66h, 0eah ; far jmp to long mode
+gSmmJmpAddr DQ @LongMode
+@LongMode: ; long-mode starts here
+ DB 48h, 0bch ; mov rsp, imm64
+gSmmInitStack DQ ?
+ and sp, 0fff0h ; make sure RSP is 16-byte aligned
+ ;
+ ; Accoring to X64 calling convention, XMM0~5 are volatile, we need to save
+ ; them before calling C-function.
+ ;
+ sub rsp, 60h
+ movdqa [rsp], xmm0
+ movdqa [rsp + 10h], xmm1
+ movdqa [rsp + 20h], xmm2
+ movdqa [rsp + 30h], xmm3
+ movdqa [rsp + 40h], xmm4
+ movdqa [rsp + 50h], xmm5
+
+ add rsp, -20h
+ call SmmInitHandler
+ add rsp, 20h
+
+ ;
+ ; Restore XMM0~5 after calling C-function.
+ ;
+ movdqa xmm0, [rsp]
+ movdqa xmm1, [rsp + 10h]
+ movdqa xmm2, [rsp + 20h]
+ movdqa xmm3, [rsp + 30h]
+ movdqa xmm4, [rsp + 40h]
+ movdqa xmm5, [rsp + 50h]
+
+ rsm
+SmmStartup ENDP
+
+gcSmmInitTemplate LABEL BYTE
+
+_SmmInitTemplate PROC
+ DB 66h, 2eh, 8bh, 2eh ; mov ebp, cs:[@F]
+ DW @L1 - _SmmInitTemplate + 8000h
+ DB 66h, 81h, 0edh, 00h, 00h, 03h, 00 ; sub ebp, 30000h
+ jmp bp ; jmp ebp actually
+@L1:
+ DQ SmmStartup
+_SmmInitTemplate ENDP
+
+gcSmmInitSize DW $ - gcSmmInitTemplate
+
+SmmRelocationSemaphoreComplete PROC
+ push rax
+ mov rax, mRebasedFlag
+ mov byte ptr [rax], 1
+ pop rax
+ jmp [mSmmRelocationOriginalAddress]
+SmmRelocationSemaphoreComplete ENDP
+
+;
+; Semaphore code running in 32-bit mode
+;
+SmmRelocationSemaphoreComplete32 PROC
+ ;
+ ; mov byte ptr [], 1
+ ;
+ db 0c6h, 05h
+mRebasedFlagAddr32 dd 0
+ db 1
+ ;
+ ; jmp dword ptr []
+ ;
+ db 0ffh, 25h
+mSmmRelocationOriginalAddressPtr32 dd 0
+SmmRelocationSemaphoreComplete32 ENDP
+
+ END
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
new file mode 100644
index 0000000000..535ee7f231
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -0,0 +1,302 @@
+/** @file
+ X64 processor specific functions to enable SMM profile.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+//
+// Current page index.
+//
+UINTN mPFPageIndex;
+
+//
+// Pool for dynamically creating page table in page fault handler.
+//
+UINT64 mPFPageBuffer;
+
+//
+// Store the uplink information for each page being used.
+//
+UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+
+ //
+ // Generate PAE page table for the first 4GB memory space
+ //
+ Pages = Gen4GPageTable (1);
+
+ //
+ // Fill Page-Table-Level4 (PML4) entry
+ //
+ PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1));
+ *PTEntry = Pages + IA32_PG_P;
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
+
+ //
+ // Return the address of PML4 (to set CR3)
+ //
+ mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+ VOID *Address;
+
+ //
+ // Pre-Allocate memory for page fault handler
+ //
+ Address = NULL;
+ Address = AllocatePages (MAX_PF_PAGE_COUNT);
+ ASSERT_EFI_ERROR (Address != NULL);
+
+ mPFPageBuffer = (UINT64)(UINTN) Address;
+ mPFPageIndex = 0;
+ ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
+ ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
+
+ return;
+}
+
+/**
+ Allocate one page for creating 4KB-page based on 2MB-page.
+
+ @param Uplink The address of Page-Directory entry.
+
+**/
+VOID
+AcquirePage (
+ UINT64 *Uplink
+ )
+{
+ UINT64 Address;
+
+ //
+ // Get the buffer
+ //
+ Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
+ ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);
+
+ //
+ // Cut the previous uplink if it exists and wasn't overwritten
+ //
+ if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & PHYSICAL_ADDRESS_MASK) == Address)) {
+ *mPFPageUplink[mPFPageIndex] = 0;
+ }
+
+ //
+ // Link & Record the current uplink
+ //
+ *Uplink = Address | IA32_PG_P | IA32_PG_RW;
+ mPFPageUplink[mPFPageIndex] = Uplink;
+
+ mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+ UINTN PTIndex;
+ UINT64 Address;
+ BOOLEAN Nx;
+ BOOLEAN Existed;
+ UINTN Index;
+ UINTN PFIndex;
+
+ ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
+
+ //
+ // If page fault address is 4GB above.
+ //
+
+ //
+ // Check if page fault address has existed in page table.
+ // If it exists in page table but page fault is generated,
+ // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
+ //
+ Existed = FALSE;
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ // PML4E
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ // PDPTE
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+ // PD
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ //
+ // 2MB page
+ //
+ Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
+ Existed = TRUE;
+ }
+ } else {
+ //
+ // 4KB page
+ //
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ if (PageTable != 0) {
+ //
+ // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
+ //
+ PTIndex = BitFieldRead64 (PFAddress, 12, 20);
+ Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+ Existed = TRUE;
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // If page entry does not existed in page table at all, create a new entry.
+ //
+ if (!Existed) {
+
+ if (IsAddressValid (PFAddress, &Nx)) {
+ //
+ // If page fault address above 4GB is in protected range but it causes a page fault exception,
+ // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
+ // this access is not saved into SMM profile data.
+ //
+ *IsValidPFAddress = TRUE;
+ }
+
+ //
+ // Create one entry in page table for page fault address.
+ //
+ SmiDefaultPFHandler ();
+ //
+ // Find the page table entry created just now.
+ //
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
+ PFAddress = AsmReadCr2 ();
+ // PML4E
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ // PDPTE
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ // PD
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+ Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK;
+ //
+ // Check if 2MB-page entry need be changed to 4KB-page entry.
+ //
+ if (IsAddressSplit (Address)) {
+ AcquirePage (&PageTable[PTIndex]);
+
+ // PTE
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ for (Index = 0; Index < 512; Index++) {
+ PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P;
+ if (!IsAddressValid (Address, &Nx)) {
+ PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
+ }
+ if (Nx && mXdSupported) {
+ PageTable[Index] = PageTable[Index] | IA32_PG_NX;
+ }
+ if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+ PTIndex = Index;
+ }
+ Address += SIZE_4KB;
+ } // end for PT
+ } else {
+ //
+ // Update 2MB page entry.
+ //
+ if (!IsAddressValid (Address, &Nx)) {
+ //
+ // Patch to remove present flag and rw flag.
+ //
+ PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
+ }
+ //
+ // Set XD bit to 1
+ //
+ if (Nx && mXdSupported) {
+ PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
+ }
+ }
+ }
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Add present flag or clear XD flag to make page fault handler succeed.
+ //
+ PageTable[PTIndex] |= IA32_PG_RW | IA32_PG_P;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ //
+ // If page fault is caused by instruction fetch, clear XD bit in the entry.
+ //
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+
+ return;
+}
diff --git a/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.h b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
new file mode 100644
index 0000000000..ea43d143a6
--- /dev/null
+++ b/BraswellPlatformPkg/Common/Silicon/IntelSiliconBasic/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
@@ -0,0 +1,106 @@
+/** @file
+ X64 processor specific header file.
+
+ Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT64 BTSBufferBase;
+ UINT64 BTSIndex;
+ UINT64 BTSAbsoluteMaximum;
+ UINT64 BTSInterruptThreshold;
+ UINT64 PEBSBufferBase;
+ UINT64 PEBSIndex;
+ UINT64 PEBSAbsoluteMaximum;
+ UINT64 PEBSInterruptThreshold;
+ UINT64 PEBSCounterReset[2];
+ UINT64 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT64 LastBranchFrom;
+ UINT64 LastBranchTo;
+ UINT64 Rsvd0 : 4;
+ UINT64 BranchPredicted : 1;
+ UINT64 Rsvd1 : 59;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT64 Rflags;
+ UINT64 LinearIP;
+ UINT64 Rax;
+ UINT64 Rbx;
+ UINT64 Rcx;
+ UINT64 Rdx;
+ UINT64 Rsi;
+ UINT64 Rdi;
+ UINT64 Rbp;
+ UINT64 Rsp;
+ UINT64 R8;
+ UINT64 R9;
+ UINT64 R10;
+ UINT64 R11;
+ UINT64 R12;
+ UINT64 R13;
+ UINT64 R14;
+ UINT64 R15;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_