diff options
Diffstat (limited to 'UefiCpuPkg')
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c | 96 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 83 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 15 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf | 2 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S | 8 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm | 8 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c | 70 |
7 files changed, 186 insertions, 96 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c new file mode 100644 index 0000000000..545b534f27 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c @@ -0,0 +1,96 @@ +/** @file
+ SMM CPU misc functions for Ia32 arch specific.
+
+Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ )
+{
+ UINTN Index;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+
+ //
+ // Enlarge GDT to contain 2 TSS descriptors
+ //
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
+
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ ASSERT (GdtTssTables != NULL);
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+
+ TssBase += TSS_SIZE;
+ GdtDescriptor++;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+ //
+ // Fixup TSS segments
+ //
+ // ESP as known good stack
+ //
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
+ }
+ } else {
+ //
+ // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
+ //
+ GdtTssTableSize = gcSmiGdtr.Limit + 1;
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ ASSERT (GdtTssTables != NULL);
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
+ }
+ }
+
+ *GdtStepSize = GdtTableStepSize;
+ return GdtTssTables;
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c index 730c32df0a..80378a3faf 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c @@ -1163,10 +1163,7 @@ InitializeMpServiceData ( UINTN Index;
MTRR_SETTINGS *Mtrr;
PROCESSOR_SMM_DESCRIPTOR *Psd;
- UINTN GdtTssTableSize;
UINT8 *GdtTssTables;
- IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
- UINTN TssBase;
UINTN GdtTableStepSize;
//
@@ -1182,71 +1179,7 @@ InitializeMpServiceData ( //
Cr3 = SmmInitPageTable ();
- GdtTssTables = NULL;
- GdtTssTableSize = 0;
- GdtTableStepSize = 0;
- //
- // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
- // on each SMI entry.
- //
- if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {
- GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
- ASSERT (GdtTssTables != NULL);
- GdtTableStepSize = GdtTssTableSize;
-
- for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
- CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
- if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
- //
- // Setup top of known good stack as IST1 for each processor.
- //
- *(UINTN *)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
- }
- }
- } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
-
- //
- // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
- // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
- // on each SMI entry.
- //
-
- //
- // Enlarge GDT to contain 2 TSS descriptors
- //
- gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
-
- GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
- ASSERT (GdtTssTables != NULL);
- GdtTableStepSize = GdtTssTableSize;
-
- for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
- CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
- //
- // Fixup TSS descriptors
- //
- TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
- GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
- GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
- GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
- GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
-
- TssBase += TSS_SIZE;
- GdtDescriptor++;
- GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
- GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
- GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
- //
- // Fixup TSS segments
- //
- // ESP as known good stack
- //
- *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
- *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
- }
- }
+ GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
//
// Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU
@@ -1254,18 +1187,8 @@ InitializeMpServiceData ( for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);
CopyMem (Psd, &gcPsd, sizeof (gcPsd));
- if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED (EFI_IMAGE_MACHINE_X64)) {
- //
- // For X64 SMM, set GDT to the copy allocated above.
- //
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
- } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
- //
- // For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above.
- //
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
- Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
- }
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);
+ Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;
//
// Install SMI handler
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h index cfbf2ca6da..106e67465c 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h @@ -428,6 +428,21 @@ InitializeIDTSmmStackGuard ( );
/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ );
+
+/**
Register the SMM Foundation entry point.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf index a293a88e99..122318f02e 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf @@ -48,6 +48,7 @@ [Sources.Ia32]
Ia32/Semaphore.c
Ia32/PageTbl.c
+ Ia32/SmmFuncsArch.c
Ia32/SmmProfileArch.c
Ia32/SmmProfileArch.h
Ia32/SmmInit.asm | MSFT
@@ -68,6 +69,7 @@ [Sources.X64]
X64/Semaphore.c
X64/PageTbl.c
+ X64/SmmFuncsArch.c
X64/SmmProfileArch.c
X64/SmmProfileArch.h
X64/SmmInit.asm | MSFT
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S index 831559357f..95e6dfa814 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S @@ -128,14 +128,6 @@ ASM_PFX(gSmiCr3): .space 4 sgdt (%rsp)
movl 2(%rsp), %eax # eax = GDT base
addl $8, %esp
- movl %eax, %edx
- addl $GDT_SIZE, %edx
- movb %dl, (TSS_SEGMENT + 2)(%rax)
- movb %dh, (TSS_SEGMENT + 3)(%rax)
- .byte 0xc1, 0xea, 0x10 # shr edx, 16
- movb %dl, (TSS_SEGMENT + 4)(%rax)
- movb %dh, (TSS_SEGMENT + 7)(%rax)
- movl %eax, %edx
movb $0x89, %dl
movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
movl $TSS_SEGMENT, %eax
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm index c556bf5f5b..4d53db5b00 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm @@ -124,14 +124,6 @@ gSmiCr3 DD ? sgdt fword ptr [rsp]
mov eax, [rsp + 2] ; eax = GDT base
add esp, 8
- mov edx, eax
- add edx, GDT_SIZE
- mov [rax + TSS_SEGMENT + 2], dl
- mov [rax + TSS_SEGMENT + 3], dh
- DB 0c1h, 0eah, 10h ; shr edx, 16
- mov [rax + TSS_SEGMENT + 4], dl
- mov [rax + TSS_SEGMENT + 7], dh
- mov edx, eax
mov dl, 89h
mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
mov eax, TSS_SEGMENT
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c new file mode 100644 index 0000000000..b53aa45c21 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c @@ -0,0 +1,70 @@ +/** @file
+ SMM CPU misc functions for x64 arch specific.
+
+Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ )
+{
+ UINTN Index;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+
+ //
+ // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ ASSERT (GdtTssTables != NULL);
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
+
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Setup top of known good stack as IST1 for each processor.
+ //
+ *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
+ }
+ }
+
+ *GdtStepSize = GdtTableStepSize;
+ return GdtTssTables;
+}
|