summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Fan <jeff.fan@intel.com>2016-06-27 15:41:50 +0800
committerHao Wu <hao.a.wu@intel.com>2016-07-21 15:11:33 +0800
commitf6879b66b55eb454e4ec9eb26907a2a627cad64b (patch)
tree1df4fe7810e341a690976a079f8b54494824e013
parentdf6bfe5beaa451a48fdcc86b13e55afe950b7b10 (diff)
downloadedk2-platforms-f6879b66b55eb454e4ec9eb26907a2a627cad64b.tar.xz
UefiCpuPkg/PiSmmCpuDxeSmm: Remove duplicate aligned buffer on S3 path
InitializeMpSyncData() invokes InitializeSmmCpuSemaphores() to allocate an aligned buffer for all locks and semaphores. However, this function is invoked on S3 resume path again to reset mSmmMpSyncData. It causes an additional aligned buffer to be allocated. This update moves InitializeSmmCpuSemaphores() into InitializeMpServiceData() that is only invoked on normal boot. InitializeMpSyncData() is updated to reset the locks/semaphore in mSmmMpSyncData. Cc: Michael Kinney <michael.d.kinney@intel.com> Cc: Feng Tian <feng.tian@intel.com> Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Jeff Fan <jeff.fan@intel.com> Reviewed-by: Feng Tian <feng.tian@intel.com> Reviewed-by: Michael Kinney <michael.d.kinney@intel.com> Regression-tested-by: Laszlo Ersek <lersek@redhat.com> (cherry picked from commit 8b9311b79557311e137d0ffdc7934fea3966b0d7)
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index 3ea4b6b91d..8970c789a6 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -1200,7 +1200,6 @@ InitializeSmmCpuSemaphores (
VOID
)
{
- UINTN CpuIndex;
UINTN ProcessorCount;
UINTN TotalSize;
UINTN GlobalSemaphoresSize;
@@ -1235,7 +1234,6 @@ InitializeSmmCpuSemaphores (
SemaphoreAddr += SemaphoreSize;
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
= (SPIN_LOCK *)SemaphoreAddr;
-
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
SemaphoreAddr += ProcessorCount * SemaphoreSize;
@@ -1249,21 +1247,9 @@ InitializeSmmCpuSemaphores (
((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
- mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
- mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
- mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
- for (CpuIndex = 0; CpuIndex < ProcessorCount; CpuIndex ++) {
- mSmmMpSyncData->CpuData[CpuIndex].Busy =
- (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + SemaphoreSize * CpuIndex);
- mSmmMpSyncData->CpuData[CpuIndex].Run =
- (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + SemaphoreSize * CpuIndex);
- mSmmMpSyncData->CpuData[CpuIndex].Present =
- (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + SemaphoreSize * CpuIndex);
- }
-
mSemaphoreSize = SemaphoreSize;
}
@@ -1277,8 +1263,10 @@ InitializeMpSyncData (
VOID
)
{
+ UINTN CpuIndex;
+
if (mSmmMpSyncData != NULL) {
- ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
+ mSmmMpSyncData->SwitchBsp = FALSE;
mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
@@ -1289,7 +1277,23 @@ InitializeMpSyncData (
}
mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
- InitializeSmmCpuSemaphores ();
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
+ ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
+ mSmmMpSyncData->AllCpusInSync != NULL);
+ *mSmmMpSyncData->Counter = 0;
+ *mSmmMpSyncData->InsideSmm = FALSE;
+ *mSmmMpSyncData->AllCpusInSync = FALSE;
+
+ for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
+ mSmmMpSyncData->CpuData[CpuIndex].Busy =
+ (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
+ mSmmMpSyncData->CpuData[CpuIndex].Run =
+ (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
+ mSmmMpSyncData->CpuData[CpuIndex].Present =
+ (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
+ }
}
}
@@ -1314,6 +1318,11 @@ InitializeMpServiceData (
UINTN GdtTableStepSize;
//
+ // Allocate memory for all locks and semaphores
+ //
+ InitializeSmmCpuSemaphores ();
+
+ //
// Initialize mSmmMpSyncData
//
mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +