summaryrefslogtreecommitdiff
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
diff options
context:
space:
mode:
authorYao, Jiewen <jiewen.yao@intel.com>2015-11-30 19:57:40 +0000
committerHao Wu <hao.a.wu@intel.com>2016-02-25 09:13:23 +0800
commitcd5e29959f602cddd8e4b42b207f2101b6e92e89 (patch)
tree090c2ab6c4b3438828354d518dc863a646c599c0 /UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
parentb2c7f09d779b252b6cd4fa48cd42aea203522bf4 (diff)
downloadedk2-platforms-cd5e29959f602cddd8e4b42b207f2101b6e92e89.tar.xz
UefiCpuPkg/PiSmmCpu: Always set RW+P bit for page table by default
So that we can use write-protection for code later. This is REPOST. It includes the bug fix from "Paolo Bonzini" <pbonzini@redhat.com>: Title: fix generation of 32-bit PAE page tables "Bits 1 and 2 are reserved in 32-bit PAE Page Directory Pointer Table Entries (PDPTEs); see Table 4-8 in the SDM. With VMX extended page tables, the processor notices and fails the VM entry as soon as CR0.PG is set to 1." And thanks "Laszlo Ersek" <lersek@redhat.com> to validate the fix. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: "Yao, Jiewen" <jiewen.yao@intel.com> Signed-off-by: "Paolo Bonzini" <pbonzini@redhat.com> Reviewed-by: Michael Kinney <michael.d.kinney@intel.com> Tested-by: Laszlo Ersek <lersek@redhat.com> Cc: "Fan, Jeff" <jeff.fan@intel.com> Cc: "Kinney, Michael D" <michael.d.kinney@intel.com> Cc: "Laszlo Ersek" <lersek@redhat.com> Cc: "Paolo Bonzini" <pbonzini@redhat.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@19067 6f19259b-4bc3-4df7-8a09-765794883524 (cherry picked from commit 881520ea6778953c57d975ca2a9cf3f2114f99c4)
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
index c4ec12debb..79e23ef647 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -45,13 +45,13 @@ InitSmmS3Cr3 (
//
// Generate PAE page table for the first 4GB memory space
//
- Pages = Gen4GPageTable (1);
+ Pages = Gen4GPageTable (1, FALSE);
//
// Fill Page-Table-Level4 (PML4) entry
//
PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1));
- *PTEntry = Pages + IA32_PG_P;
+ *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
//
@@ -117,7 +117,7 @@ AcquirePage (
//
// Link & Record the current uplink
//
- *Uplink = Address | IA32_PG_P | IA32_PG_RW;
+ *Uplink = Address | PAGE_ATTRIBUTE_BITS;
mPFPageUplink[mPFPageIndex] = Uplink;
mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
@@ -242,9 +242,9 @@ RestorePageTableAbove4G (
// PTE
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
for (Index = 0; Index < 512; Index++) {
- PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P;
+ PageTable[Index] = Address | PAGE_ATTRIBUTE_BITS;
if (!IsAddressValid (Address, &Nx)) {
- PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
+ PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
}
if (Nx && mXdSupported) {
PageTable[Index] = PageTable[Index] | IA32_PG_NX;
@@ -262,7 +262,7 @@ RestorePageTableAbove4G (
//
// Patch to remove present flag and rw flag.
//
- PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));
+ PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
}
//
// Set XD bit to 1
@@ -289,7 +289,7 @@ RestorePageTableAbove4G (
//
// Add present flag or clear XD flag to make page fault handler succeed.
//
- PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);
+ PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);
if ((ErrorCode & IA32_PF_EC_ID) != 0) {
//
// If page fault is caused by instruction fetch, clear XD bit in the entry.