summaryrefslogtreecommitdiff
path: root/ArmPkg/Drivers/CpuDxe
diff options
context:
space:
mode:
authorandrewfish <andrewfish@6f19259b-4bc3-4df7-8a09-765794883524>2011-02-02 22:35:30 +0000
committerandrewfish <andrewfish@6f19259b-4bc3-4df7-8a09-765794883524>2011-02-02 22:35:30 +0000
commit1bfda055dfbc52678655ab2ded721f9f7c0cd496 (patch)
treefbfa3654ec28d060955ff37e9e9365ad37179013 /ArmPkg/Drivers/CpuDxe
parent7373d15a98fb571bf56688676c8ba950e6f62b8d (diff)
downloadedk2-platforms-1bfda055dfbc52678655ab2ded721f9f7c0cd496.tar.xz
Sync up ArmPkg with patch from mailing list. Changed name of BdsLib.h to BdsUnixLib.h and fixed a lot of issues with Xcode building.
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@11293 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'ArmPkg/Drivers/CpuDxe')
-rw-r--r--ArmPkg/Drivers/CpuDxe/CpuDxe.inf3
-rw-r--r--ArmPkg/Drivers/CpuDxe/Exception.c104
-rw-r--r--ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.S29
-rw-r--r--ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.asm10
-rw-r--r--ArmPkg/Drivers/CpuDxe/ExceptionSupport.S10
-rw-r--r--ArmPkg/Drivers/CpuDxe/Mmu.c302
6 files changed, 176 insertions, 282 deletions
diff --git a/ArmPkg/Drivers/CpuDxe/CpuDxe.inf b/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
index 2fc5fda691..25c0b880b2 100644
--- a/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
+++ b/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
@@ -67,10 +67,13 @@
gEfiDebugImageInfoTableGuid
[Pcd.common]
+ gArmTokenSpaceGuid.PcdVFPEnabled
gArmTokenSpaceGuid.PcdCpuVectorBaseAddress
[FeaturePcd.common]
gArmTokenSpaceGuid.PcdCpuDxeProduceDebugSupport
+ gArmTokenSpaceGuid.PcdRelocateVectorTable
+ gArmTokenSpaceGuid.PcdEfiUncachedMemoryToStronglyOrdered
[depex]
gHardwareInterruptProtocolGuid
diff --git a/ArmPkg/Drivers/CpuDxe/Exception.c b/ArmPkg/Drivers/CpuDxe/Exception.c
index e0aca46cd7..21a4c035a9 100644
--- a/ArmPkg/Drivers/CpuDxe/Exception.c
+++ b/ArmPkg/Drivers/CpuDxe/Exception.c
@@ -14,6 +14,8 @@
#include "CpuDxe.h"
+//FIXME: Will not compile on non-ARMv7 builds
+#include <Chipset/ArmV7.h>
VOID
ExceptionHandlersStart (
@@ -127,6 +129,7 @@ InitializeExceptions (
EFI_PHYSICAL_ADDRESS Base;
UINT32 *VectorBase;
+ Status = EFI_SUCCESS;
//
// Disable interrupts
//
@@ -140,54 +143,63 @@ InitializeExceptions (
FiqEnabled = ArmGetFiqState ();
ArmDisableFiq ();
- //
- // Copy an implementation of the ARM exception vectors to PcdCpuVectorBaseAddress.
- //
- Length = (UINTN)ExceptionHandlersEnd - (UINTN)ExceptionHandlersStart;
-
- //
- // Reserve space for the exception handlers
- //
- Base = (EFI_PHYSICAL_ADDRESS)PcdGet32 (PcdCpuVectorBaseAddress);
- VectorBase = (UINT32 *)(UINTN)Base;
- Status = gBS->AllocatePages (AllocateAddress, EfiBootServicesCode, EFI_SIZE_TO_PAGES (Length), &Base);
- // If the request was for memory that's not in the memory map (which is often the case for 0x00000000
- // on embedded systems, for example, we don't want to hang up. So we'll check here for a status of
- // EFI_NOT_FOUND, and continue in that case.
- if (EFI_ERROR(Status) && (Status != EFI_NOT_FOUND)) {
- ASSERT_EFI_ERROR (Status);
- }
-
- // Save existing vector table, in case debugger is already hooked in
- CopyMem ((VOID *)gDebuggerExceptionHandlers, (VOID *)VectorBase, sizeof (gDebuggerExceptionHandlers));
-
- // Copy our assembly code into the page that contains the exception vectors.
- CopyMem ((VOID *)VectorBase, (VOID *)ExceptionHandlersStart, Length);
-
- //
- // Patch in the common Assembly exception handler
- //
- Offset = (UINTN)CommonExceptionEntry - (UINTN)ExceptionHandlersStart;
- *(UINTN *) ((UINT8 *)(UINTN)PcdGet32 (PcdCpuVectorBaseAddress) + Offset) = (UINTN)AsmCommonExceptionEntry;
-
- //
- // Initialize the C entry points for interrupts
- //
- for (Index = 0; Index <= MAX_ARM_EXCEPTION; Index++) {
- if ((gDebuggerExceptionHandlers[Index] == 0) || (gDebuggerExceptionHandlers[Index] == (VOID *)(UINTN)0xEAFFFFFE)) {
- // Exception handler contains branch to vector location (jmp $) so no handler
- // NOTE: This code assumes vectors are ARM and not Thumb code
- Status = RegisterInterruptHandler (Index, NULL);
- ASSERT_EFI_ERROR (Status);
- } else {
- // If the debugger has alread hooked put its vector back
- VectorBase[Index] = (UINT32)(UINTN)gDebuggerExceptionHandlers[Index];
- }
+ if (FeaturePcdGet(PcdRelocateVectorTable) == TRUE) {
+ //
+ // Copy an implementation of the ARM exception vectors to PcdCpuVectorBaseAddress.
+ //
+ Length = (UINTN)ExceptionHandlersEnd - (UINTN)ExceptionHandlersStart;
+
+ //
+ // Reserve space for the exception handlers
+ //
+ Base = (EFI_PHYSICAL_ADDRESS)PcdGet32 (PcdCpuVectorBaseAddress);
+ VectorBase = (UINT32 *)(UINTN)Base;
+ Status = gBS->AllocatePages (AllocateAddress, EfiBootServicesCode, EFI_SIZE_TO_PAGES (Length), &Base);
+ // If the request was for memory that's not in the memory map (which is often the case for 0x00000000
+ // on embedded systems, for example, we don't want to hang up. So we'll check here for a status of
+ // EFI_NOT_FOUND, and continue in that case.
+ if (EFI_ERROR(Status) && (Status != EFI_NOT_FOUND)) {
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ // Save existing vector table, in case debugger is already hooked in
+ CopyMem ((VOID *)gDebuggerExceptionHandlers, (VOID *)VectorBase, sizeof (gDebuggerExceptionHandlers));
+
+ // Copy our assembly code into the page that contains the exception vectors.
+ CopyMem ((VOID *)VectorBase, (VOID *)ExceptionHandlersStart, Length);
+
+ //
+ // Patch in the common Assembly exception handler
+ //
+ Offset = (UINTN)CommonExceptionEntry - (UINTN)ExceptionHandlersStart;
+ *(UINTN *) ((UINT8 *)(UINTN)PcdGet32 (PcdCpuVectorBaseAddress) + Offset) = (UINTN)AsmCommonExceptionEntry;
+
+ //
+ // Initialize the C entry points for interrupts
+ //
+ for (Index = 0; Index <= MAX_ARM_EXCEPTION; Index++) {
+ if ((gDebuggerExceptionHandlers[Index] == 0) || (gDebuggerExceptionHandlers[Index] == (VOID *)(UINTN)0xEAFFFFFE)) {
+ // Exception handler contains branch to vector location (jmp $) so no handler
+ // NOTE: This code assumes vectors are ARM and not Thumb code
+ Status = RegisterInterruptHandler (Index, NULL);
+ ASSERT_EFI_ERROR (Status);
+ } else {
+ // If the debugger has alread hooked put its vector back
+ VectorBase[Index] = (UINT32)(UINTN)gDebuggerExceptionHandlers[Index];
+ }
+ }
+
+ // Flush Caches since we updated executable stuff
+ InvalidateInstructionCacheRange ((VOID *)PcdGet32(PcdCpuVectorBaseAddress), Length);
+
+ //Note: On ARM processor with the Security Extension, the Vector Table can be located anywhere in the memory.
+ // The Vector Base Address Register defines the location
+ ArmWriteVBar(PcdGet32(PcdCpuVectorBaseAddress));
+ } else {
+ // We do not copy the Exception Table at PcdGet32(PcdCpuVectorBaseAddress). We just set Vector Base Address to point into CpuDxe code.
+ ArmWriteVBar((UINT32)ExceptionHandlersStart);
}
- // Flush Caches since we updated executable stuff
- InvalidateInstructionCacheRange ((VOID *)PcdGet32(PcdCpuVectorBaseAddress), Length);
-
if (FiqEnabled) {
ArmEnableFiq ();
}
diff --git a/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.S b/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.S
index 1c2bb62605..86d2a7135f 100644
--- a/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.S
+++ b/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.S
@@ -14,6 +14,8 @@
#
#------------------------------------------------------------------------------
+#include <Library/PcdLib.h>
+
/*
This is the stack constructed by the exception handler (low address to high address)
@@ -50,22 +52,17 @@ This is the stack constructed by the exception handler (low address to high addr
*/
-.globl ASM_PFX(ExceptionHandlersStart)
-INTERWORK_FUNC(ExceptionHandlersStart)
-.globl ASM_PFX(ExceptionHandlersEnd)
-INTERWORK_FUNC(ExceptionHandlersEnd)
-.globl ASM_PFX(CommonExceptionEntry)
-INTERWORK_FUNC(CommonExceptionEntry)
-.globl ASM_PFX(AsmCommonExceptionEntry)
-INTERWORK_FUNC(AsmCommonExceptionEntry)
-.globl ASM_PFX(CommonCExceptionHandler)
-INTERWORK_FUNC(CommonCExceptionHandler)
+GCC_ASM_EXPORT(ExceptionHandlersStart)
+GCC_ASM_EXPORT(ExceptionHandlersEnd)
+GCC_ASM_EXPORT(CommonExceptionEntry)
+GCC_ASM_EXPORT(AsmCommonExceptionEntry)
+GCC_ASM_EXPORT(CommonCExceptionHandler)
.text
#if !defined(__APPLE__)
.fpu neon @ makes vpush/vpop assemble
#endif
-.align 3
+.align 5
//
@@ -198,10 +195,7 @@ ASM_PFX(FiqEntry):
// This gets patched by the C code that patches in the vector table
//
ASM_PFX(CommonExceptionEntry):
- .byte 0x12
- .byte 0x34
- .byte 0x56
- .byte 0x78
+ .word ASM_PFX(AsmCommonExceptionEntry)
ASM_PFX(ExceptionHandlersEnd):
@@ -254,8 +248,9 @@ NoAdjustNeeded:
@ R0 is ExceptionType
mov R1,SP @ R1 is SystemContext
+#if (FixedPcdGet32(PcdVFPEnabled))
vpush {d0-d15} @ save vstm registers in case they are used in optimizations
-
+#endif
/*
VOID
@@ -268,7 +263,9 @@ CommonCExceptionHandler (
*/
blx ASM_PFX(CommonCExceptionHandler) @ Call exception handler
+#if (FixedPcdGet32(PcdVFPEnabled))
vpop {d0-d15}
+#endif
ldr R1, [SP, #0x4c] @ Restore EFI_SYSTEM_CONTEXT_ARM.IFSR
mcr p15, 0, R1, c5, c0, 1 @ Write IFSR
diff --git a/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.asm b/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.asm
index a8a477026e..240e1e38ae 100644
--- a/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.asm
+++ b/ArmPkg/Drivers/CpuDxe/ExceptionSupport.ARMv6.asm
@@ -14,7 +14,7 @@
//
//------------------------------------------------------------------------------
-
+#include <Library/PcdLib.h>
/*
@@ -61,6 +61,8 @@ This is the stack constructed by the exception handler (low address to high addr
PRESERVE8
AREA DxeExceptionHandlers, CODE, READONLY
+ ALIGN 32
+
//
// This code gets copied to the ARM vector table
// ExceptionHandlersStart - ExceptionHandlersEnd gets copied
@@ -190,7 +192,7 @@ FiqEntry
// This gets patched by the C code that patches in the vector table
//
CommonExceptionEntry
- dcd 0x12345678
+ dcd AsmCommonExceptionEntry
ExceptionHandlersEnd
@@ -243,7 +245,9 @@ NoAdjustNeeded
; R0 is ExceptionType
mov R1,SP ; R1 is SystemContext
+#if (FixedPcdGet32(PcdVFPEnabled))
vpush {d0-d15} ; save vstm registers in case they are used in optimizations
+#endif
/*
VOID
@@ -256,7 +260,9 @@ CommonCExceptionHandler (
*/
blx CommonCExceptionHandler ; Call exception handler
+#if (FixedPcdGet32(PcdVFPEnabled))
vpop {d0-d15}
+#endif
ldr R1, [SP, #0x4c] ; Restore EFI_SYSTEM_CONTEXT_ARM.IFSR
mcr p15, 0, R1, c5, c0, 1 ; Write IFSR
diff --git a/ArmPkg/Drivers/CpuDxe/ExceptionSupport.S b/ArmPkg/Drivers/CpuDxe/ExceptionSupport.S
index 223a383d36..96bd68246f 100644
--- a/ArmPkg/Drivers/CpuDxe/ExceptionSupport.S
+++ b/ArmPkg/Drivers/CpuDxe/ExceptionSupport.S
@@ -15,11 +15,11 @@
.text
.align 3
-.globl ASM_PFX(ExceptionHandlersStart)
-.globl ASM_PFX(ExceptionHandlersEnd)
-.globl ASM_PFX(CommonExceptionEntry)
-.globl ASM_PFX(AsmCommonExceptionEntry)
-.globl ASM_PFX(CommonCExceptionHandler)
+GCC_ASM_EXPORT(ExceptionHandlersStart)
+GCC_ASM_EXPORT(ExceptionHandlersEnd)
+GCC_ASM_EXPORT(CommonExceptionEntry)
+GCC_ASM_EXPORT(AsmCommonExceptionEntry)
+GCC_ASM_EXPORT(CommonCExceptionHandler)
ASM_PFX(ExceptionHandlersStart):
diff --git a/ArmPkg/Drivers/CpuDxe/Mmu.c b/ArmPkg/Drivers/CpuDxe/Mmu.c
index 3662e739e3..d7ea0eb551 100644
--- a/ArmPkg/Drivers/CpuDxe/Mmu.c
+++ b/ArmPkg/Drivers/CpuDxe/Mmu.c
@@ -15,129 +15,15 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
--*/
#include "CpuDxe.h"
-
-
-//
-// Translation/page table definitions
-//
+//FIXME: Remove this ARMv7 specific header
+#include <Chipset/ArmV7.h>
// First Level Descriptors
typedef UINT32 ARM_FIRST_LEVEL_DESCRIPTOR;
-// memory space covered by a first level descriptor
-#define ARM_PAGE_DESC_ENTRY_MVA_SIZE 0x00100000 // 1MB
-
-// number of first level descriptors to cover entire 32-bit memory space
-#define FIRST_LEVEL_ENTRY_COUNT (0xFFFFFFFF / ARM_PAGE_DESC_ENTRY_MVA_SIZE + 1)
-
-
-// page table 1st level descriptor entries
-#define ARM_PAGE_DESC_BASE_MASK 0xFFFFFC00
-#define ARM_PAGE_DESC_BASE_SHFIT 10
-#define ARM_PAGE_DESC_DOMAIN_MASK 0x000001E0
-#define ARM_PAGE_DESC_DOMAIN_SHIFT 5
-#define ARM_PAGE_DESC_NS 0x00000008
-
-#define ARM_FIRST_LEVEL_DESC_ALIGN 0x00004000 // 16KB
-
-// section 1st level desriptor entries
-#define ARM_SECTION_BASE_MASK 0xFFF00000
-#define ARM_SECTION_BASE_SHIFT 20
-#define ARM_SECTION_NS 0x00080000
-#define ARM_SECTION_nG 0x00020000
-#define ARM_SECTION_S 0x00010000
-#define ARM_SECTION_AP2 0x00008000
-#define ARM_SECTION_TEX_MASK 0x00007000
-#define ARM_SECTION_TEX_SHIFT 12
-#define ARM_SECTION_AP10_MASK 0x00000C00
-#define ARM_SECTION_AP10_SHIFT 10
-#define ARM_SECTION_DOMAIN_MASK 0x000001E0
-#define ARM_SECTION_DOMAIN_SHIFT 5
-#define ARM_SECTION_XN 0x00000010
-#define ARM_SECTION_C 0x00000008
-#define ARM_SECTION_B 0x00000004
-
-// section level AP[2:0] definitions
-#define ARM_SECTION_AP_NO_ACCESS 0 // AP[2:0] = 0
-#define ARM_SECTION_AP_READ_WRITE ARM_SECTION_AP10_MASK // AP[2:0] = 011
-#define ARM_SECTION_AP_READ_ONLY (ARM_SECTION_AP2 | ARM_SECTION_AP10_MASK) // AP[2:0] = 111
-
-// common 1st level descriptor fields
-#define ARM_DESC_TYPE_MASK 0x00000003
-
-// descriptor type values
-#define ARM_DESC_TYPE_FAULT 0x0
-#define ARM_DESC_TYPE_PAGE_TABLE 0x1
-#define ARM_DESC_TYPE_SECTION 0x2
-
-
// Second Level Descriptors
typedef UINT32 ARM_PAGE_TABLE_ENTRY;
-// small page 2nd level descriptor entries
-#define ARM_SMALL_PAGE_BASE_MASK 0xFFFFF000
-#define ARM_SMALL_PAGE_INDEX_MASK 0x000FF000
-#define ARM_SMALL_PAGE_BASE_SHIFT 12
-#define ARM_SMALL_PAGE_TEX_MASK 0x000001C0
-#define ARM_SMALL_PAGE_TEX_SHIFT 6
-#define ARM_SMALL_PAGE_XN 0x00000001
-
-// large page 2nd level descriptor entries
-#define ARM_LARGE_PAGE_BASE_MASK 0xFFFF0000
-#define ARM_LARGE_PAGE_BASE_SHIFT 16
-#define ARM_LARGE_PAGE_TEX_MASK 0x00007000
-#define ARM_LARGE_PAGE_TEX_SHIFT 12
-#define ARM_LARGE_PAGE_XN 0x00008000
-
-// common 2nd level desriptor fields
-#define ARM_PAGE_nG 0x00000800
-#define ARM_PAGE_S 0x00000400
-#define ARM_PAGE_AP2 0x00000200
-#define ARM_PAGE_AP10_MASK 0x00000030
-#define ARM_PAGE_AP10_SHIFT 4
-#define ARM_PAGE_C 0x00000008
-#define ARM_PAGE_B 0x00000004
-#define ARM_PAGE_DESC_TYPE_MASK 0x00000003
-
-// descriptor type values
-#define ARM_PAGE_TYPE_FAULT 0x0
-#define ARM_PAGE_TYPE_LARGE 0x1
-#define ARM_PAGE_TYPE_SMALL 0x2
-#define ARM_PAGE_TYPE_SMALL_XN 0x3
-
-#define SMALL_PAGE_TABLE_ENTRY_COUNT (ARM_PAGE_DESC_ENTRY_MVA_SIZE / SIZE_4KB)
-
-
-// Translation Table Base 0 fields
-#define ARM_TTBR0_BASE_MASK 0xFFFFC000
-#define ARM_TTBR0_BASE_SHIFT 14
-#define ARM_TTRB0_NOS 0x00000020
-
-// define the combination of interesting attributes: cacheability and access permissions
-#define ARM_SECTION_CACHEABILITY_MASK ( ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B )
-#define ARM_SECTION_RW_PERMISSIONS_MASK ( ARM_SECTION_AP2 | ARM_SECTION_AP10_MASK )
-#define ARM_DESCRIPTOR_ATTRIBUTES ( ARM_SECTION_CACHEABILITY_MASK | ARM_SECTION_RW_PERMISSIONS_MASK | ARM_SECTION_XN )
-
-// cacheability values for section entries
-#define ARM_SECTION_STRONGLY_ORDERED 0
-#define ARM_SECTION_SHAREABLE_DEVICE ARM_SECTION_B
-#define ARM_SECTION_WRITE_THROUGH ARM_SECTION_C
-#define ARM_SECTION_WRITE_BACK_NWA ( ARM_SECTION_C| ARM_SECTION_B )
-#define ARM_SECTION_NORMAL_UNCACHEABLE ( 0x1 << ARM_SECTION_TEX_SHIFT )
-#define ARM_SECTION_WRITE_BACK ( ( 0x1 << ARM_SECTION_TEX_SHIFT ) | ARM_SECTION_C | ARM_SECTION_B )
-#define ARM_SECTION_NONSHAREABLE_DEVICE ( 0x2 << ARM_SECTION_TEX_SHIFT )
-
-// permissions values for section entries
-#define ARM_SECTION_NO_ACCESS 0
-#define ARM_SECTION_PRIV_ACCESS_ONLY ( 0x1 << ARM_SECTION_AP10_SHIFT)
-#define ARM_SECTION_USER_READ_ONLY ( 0x2 << ARM_SECTION_AP10_SHIFT)
-#define ARM_SECTION_FULL_ACCESS ( 0x3 << ARM_SECTION_AP10_SHIFT)
-#define ARM_SECTION_PRIV_READ_ONLY ( ARM_SECTION_AP2 | (0x1 << ARM_SECTION_AP10_SHIFT) )
-#define ARM_SECTION_READ_ONLY_DEP ( ARM_SECTION_AP2 | (0x2 << ARM_SECTION_AP10_SHIFT) )
-#define ARM_SECTION_READ_ONLY ( ARM_SECTION_AP2 | (0x3 << ARM_SECTION_AP10_SHIFT) )
-
-
-
EFI_STATUS
SectionToGcdAttributes (
IN UINT32 SectionAttributes,
@@ -147,47 +33,46 @@ SectionToGcdAttributes (
*GcdAttributes = 0;
// determine cacheability attributes
- switch(SectionAttributes & ARM_SECTION_CACHEABILITY_MASK) {
- case ARM_SECTION_STRONGLY_ORDERED:
+ switch(SectionAttributes & TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK) {
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_STRONGLY_ORDERED:
*GcdAttributes |= EFI_MEMORY_UC;
break;
- case ARM_SECTION_SHAREABLE_DEVICE:
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_SHAREABLE_DEVICE:
*GcdAttributes |= EFI_MEMORY_UC;
break;
- case ARM_SECTION_WRITE_THROUGH:
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC:
*GcdAttributes |= EFI_MEMORY_WT;
break;
- case ARM_SECTION_WRITE_BACK_NWA:
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_BACK_NO_ALLOC:
*GcdAttributes |= EFI_MEMORY_WB;
break;
- case ARM_SECTION_NORMAL_UNCACHEABLE:
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_NON_CACHEABLE:
*GcdAttributes |= EFI_MEMORY_WC;
break;
- case ARM_SECTION_WRITE_BACK:
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_BACK_ALLOC:
*GcdAttributes |= EFI_MEMORY_WB;
break;
- case ARM_SECTION_NONSHAREABLE_DEVICE:
+ case TT_DESCRIPTOR_SECTION_CACHE_POLICY_NON_SHAREABLE_DEVICE:
*GcdAttributes |= EFI_MEMORY_UC;
break;
default:
return EFI_UNSUPPORTED;
}
-
+
// determine protection attributes
- switch(SectionAttributes & ARM_SECTION_RW_PERMISSIONS_MASK) {
- case ARM_SECTION_NO_ACCESS: // no read, no write
+ switch(SectionAttributes & TT_DESCRIPTOR_SECTION_AP_MASK) {
+ case TT_DESCRIPTOR_SECTION_AP_NO_NO: // no read, no write
//*GcdAttributes |= EFI_MEMORY_WP | EFI_MEMORY_RP;
break;
- case ARM_SECTION_PRIV_ACCESS_ONLY:
- case ARM_SECTION_FULL_ACCESS:
+ case TT_DESCRIPTOR_SECTION_AP_RW_NO:
+ case TT_DESCRIPTOR_SECTION_AP_RW_RW:
// normal read/write access, do not add additional attributes
break;
// read only cases map to write-protect
- case ARM_SECTION_PRIV_READ_ONLY:
- case ARM_SECTION_READ_ONLY_DEP:
- case ARM_SECTION_READ_ONLY:
+ case TT_DESCRIPTOR_SECTION_AP_RO_NO:
+ case TT_DESCRIPTOR_SECTION_AP_RO_RO:
*GcdAttributes |= EFI_MEMORY_WP;
break;
@@ -196,7 +81,7 @@ SectionToGcdAttributes (
}
// now process eXectue Never attribute
- if ((SectionAttributes & ARM_SECTION_XN) != 0 ) {
+ if ((SectionAttributes & TT_DESCRIPTOR_SECTION_XN_MASK) != 0 ) {
*GcdAttributes |= EFI_MEMORY_XP;
}
@@ -369,22 +254,22 @@ SyncCacheConfig (
// with a way for GCD to query the CPU Arch. driver of the existing memory space attributes instead.
// obtain page table base
- FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)(ArmGetTranslationTableBaseAddress ());
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)(ArmGetTTBR0BaseAddress ());
// iterate through each 1MB descriptor
NextRegionBase = NextRegionLength = 0;
- for (i=0; i< FIRST_LEVEL_ENTRY_COUNT; i++) {
+ for (i=0; i< TRANSLATION_TABLE_SECTION_COUNT; i++) {
// obtain existing descriptor and make sure it contains a valid Base Address even if it is a fault section
- Descriptor = FirstLevelTable[i] | (ARM_SECTION_BASE_MASK & (i << ARM_SECTION_BASE_SHIFT));
+ Descriptor = FirstLevelTable[i] | TT_DESCRIPTOR_SECTION_BASE_ADDRESS(i << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
// extract attributes (cacheability and permissions)
- SectionAttributes = Descriptor & 0xDEC;
+ SectionAttributes = Descriptor & (TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK | TT_DESCRIPTOR_SECTION_AP_MASK);
// do we already have an existing region (or are we about to finish)?
// Skip the first entry, and make sure we close on the last entry
- if ( (NextRegionLength > 0) || (i == (FIRST_LEVEL_ENTRY_COUNT-1)) ) {
+ if ( (NextRegionLength > 0) || (i == (TRANSLATION_TABLE_SECTION_COUNT-1)) ) {
// attributes are changing, update attributes in GCD
if (SectionAttributes != NextRegionAttributes) {
@@ -398,7 +283,7 @@ SyncCacheConfig (
// start on a new region
NextRegionLength = 0;
- NextRegionBase = Descriptor & ARM_SECTION_BASE_MASK;
+ NextRegionBase = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(Descriptor);
}
}
@@ -407,7 +292,7 @@ SyncCacheConfig (
NextRegionAttributes = SectionAttributes;
}
- NextRegionLength += ARM_PAGE_DESC_ENTRY_MVA_SIZE;
+ NextRegionLength += TT_DESCRIPTOR_SECTION_SIZE;
} // section entry loop
@@ -444,37 +329,42 @@ UpdatePageEntries (
// EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone)
// EntryValue: values at bit positions specified by EntryMask
- EntryMask = ARM_PAGE_DESC_TYPE_MASK;
- EntryValue = ARM_PAGE_TYPE_SMALL;
+ EntryMask = TT_DESCRIPTOR_PAGE_TYPE_MASK;
+ EntryValue = TT_DESCRIPTOR_PAGE_TYPE_PAGE;
// Although the PI spec is unclear on this the GCD guarantees that only
// one Attribute bit is set at a time, so we can safely use a switch statement
switch (Attributes) {
case EFI_MEMORY_UC:
// modify cacheability attributes
- EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;
- // map to strongly ordered
- EntryValue |= 0; // TEX[2:0] = 0, C=0, B=0
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
+ if (FeaturePcdGet(PcdEfiUncachedMemoryToStronglyOrdered)) {
+ // map to strongly ordered
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_STRONGLY_ORDERED; // TEX[2:0] = 0, C=0, B=0
+ } else {
+ // map to normal non-cachable
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0
+ }
break;
case EFI_MEMORY_WC:
// modify cacheability attributes
- EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
// map to normal non-cachable
- EntryValue |= (0x1 << ARM_SMALL_PAGE_TEX_SHIFT); // TEX [2:0]= 001 = 0x2, B=0, C=0
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0
break;
case EFI_MEMORY_WT:
// modify cacheability attributes
- EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
// write through with no-allocate
- EntryValue |= ARM_PAGE_C; // TEX [2:0] = 0, C=1, B=0
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC; // TEX [2:0] = 0, C=1, B=0
break;
case EFI_MEMORY_WB:
// modify cacheability attributes
- EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
// write back (with allocate)
- EntryValue |= (0x1 << ARM_SMALL_PAGE_TEX_SHIFT) | ARM_PAGE_C | ARM_PAGE_B; // TEX [2:0] = 001, C=1, B=1
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_WRITE_BACK_ALLOC; // TEX [2:0] = 001, C=1, B=1
break;
case EFI_MEMORY_WP:
@@ -482,7 +372,7 @@ UpdatePageEntries (
case EFI_MEMORY_UCE:
// cannot be implemented UEFI definition unclear for ARM
// Cause a page fault if these ranges are accessed.
- EntryValue = ARM_PAGE_TYPE_FAULT;
+ EntryValue = TT_DESCRIPTOR_PAGE_TYPE_FAULT;
DEBUG ((EFI_D_PAGE, "SetMemoryAttributes(): setting page %lx with unsupported attribute %x will page fault on access\n", BaseAddress, Attributes));
break;
@@ -491,7 +381,7 @@ UpdatePageEntries (
}
// obtain page table base
- FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTranslationTableBaseAddress ();
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
// calculate number of 4KB page table entries to change
NumPageEntries = Length/SIZE_4KB;
@@ -501,15 +391,15 @@ UpdatePageEntries (
for(p=0; p<NumPageEntries; p++) {
// calculate index into first level translation table for page table value
- FirstLevelIdx = ((BaseAddress + Offset) & ARM_SECTION_BASE_MASK) >> ARM_SECTION_BASE_SHIFT;
- ASSERT (FirstLevelIdx < FIRST_LEVEL_ENTRY_COUNT);
+ FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress + Offset) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
// read the descriptor from the first level page table
Descriptor = FirstLevelTable[FirstLevelIdx];
// does this descriptor need to be converted from section entry to 4K pages?
- if ((Descriptor & ARM_DESC_TYPE_MASK) != ARM_DESC_TYPE_PAGE_TABLE ) {
- Status = ConvertSectionToPages (FirstLevelIdx << ARM_SECTION_BASE_SHIFT);
+ if (!TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(Descriptor)) {
+ Status = ConvertSectionToPages (FirstLevelIdx << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
if (EFI_ERROR(Status)) {
// exit for loop
break;
@@ -520,11 +410,11 @@ UpdatePageEntries (
}
// obtain page table base address
- PageTable = (ARM_PAGE_TABLE_ENTRY *)(Descriptor & ARM_SMALL_PAGE_BASE_MASK);
+ PageTable = (ARM_PAGE_TABLE_ENTRY *)TT_DESCRIPTOR_PAGE_BASE_ADDRESS(Descriptor);
// calculate index into the page table
- PageTableIndex = ((BaseAddress + Offset) & ARM_SMALL_PAGE_INDEX_MASK) >> ARM_SMALL_PAGE_BASE_SHIFT;
- ASSERT (PageTableIndex < SMALL_PAGE_TABLE_ENTRY_COUNT);
+ PageTableIndex = ((BaseAddress + Offset) & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT;
+ ASSERT (PageTableIndex < TRANSLATION_TABLE_PAGE_COUNT);
// get the entry
CurrentPageTableEntry = PageTable[PageTableIndex];
@@ -541,8 +431,8 @@ UpdatePageEntries (
}
if (CurrentPageTableEntry != PageTableEntry) {
- Mva = (VOID *)(UINTN)((((UINTN)FirstLevelIdx) << ARM_SECTION_BASE_SHIFT) + (PageTableIndex << ARM_SMALL_PAGE_BASE_SHIFT));
- if ((CurrentPageTableEntry & ARM_PAGE_C) == ARM_PAGE_C) {
+ Mva = (VOID *)(UINTN)((((UINTN)FirstLevelIdx) << TT_DESCRIPTOR_SECTION_BASE_SHIFT) + (PageTableIndex << TT_DESCRIPTOR_PAGE_BASE_SHIFT));
+ if ((CurrentPageTableEntry & TT_DESCRIPTOR_PAGE_CACHEABLE_MASK) == TT_DESCRIPTOR_PAGE_CACHEABLE_MASK) {
// The current section mapping is cacheable so Clean/Invalidate the MVA of the page
// Note assumes switch(Attributes), not ARMv7 possibilities
WriteBackInvalidateDataCacheRange (Mva, SIZE_4KB);
@@ -586,38 +476,43 @@ UpdateSectionEntries (
// EntryValue: values at bit positions specified by EntryMask
// Make sure we handle a section range that is unmapped
- EntryMask = ARM_DESC_TYPE_MASK;
- EntryValue = ARM_DESC_TYPE_SECTION;
+ EntryMask = TT_DESCRIPTOR_SECTION_TYPE_MASK;
+ EntryValue = TT_DESCRIPTOR_SECTION_TYPE_SECTION;
// Although the PI spec is unclear on this the GCD guarantees that only
// one Attribute bit is set at a time, so we can safely use a switch statement
switch(Attributes) {
case EFI_MEMORY_UC:
// modify cacheability attributes
- EntryMask |= ARM_SECTION_CACHEABILITY_MASK;
- // map to strongly ordered
- EntryValue |= 0; // TEX[2:0] = 0, C=0, B=0
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
+ if (FeaturePcdGet(PcdEfiUncachedMemoryToStronglyOrdered)) {
+ // map to strongly ordered
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_STRONGLY_ORDERED; // TEX[2:0] = 0, C=0, B=0
+ } else {
+ // map to normal non-cachable
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0
+ }
break;
case EFI_MEMORY_WC:
// modify cacheability attributes
- EntryMask |= ARM_SECTION_CACHEABILITY_MASK;
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
// map to normal non-cachable
- EntryValue |= (0x1 << ARM_SECTION_TEX_SHIFT); // TEX [2:0]= 001 = 0x2, B=0, C=0
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0
break;
case EFI_MEMORY_WT:
// modify cacheability attributes
- EntryMask |= ARM_SECTION_CACHEABILITY_MASK;
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
// write through with no-allocate
- EntryValue |= ARM_SECTION_C; // TEX [2:0] = 0, C=1, B=0
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC; // TEX [2:0] = 0, C=1, B=0
break;
case EFI_MEMORY_WB:
// modify cacheability attributes
- EntryMask |= ARM_SECTION_CACHEABILITY_MASK;
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
// write back (with allocate)
- EntryValue |= (0x1 << ARM_SECTION_TEX_SHIFT) | ARM_SECTION_C | ARM_SECTION_B; // TEX [2:0] = 001, C=1, B=1
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_BACK_ALLOC; // TEX [2:0] = 001, C=1, B=1
break;
case EFI_MEMORY_WP:
@@ -626,7 +521,7 @@ UpdateSectionEntries (
case EFI_MEMORY_UCE:
// cannot be implemented UEFI definition unclear for ARM
// Cause a page fault if these ranges are accessed.
- EntryValue = ARM_DESC_TYPE_FAULT;
+ EntryValue = TT_DESCRIPTOR_SECTION_TYPE_FAULT;
DEBUG ((EFI_D_PAGE, "SetMemoryAttributes(): setting section %lx with unsupported attribute %x will page fault on access\n", BaseAddress, Attributes));
break;
@@ -636,23 +531,23 @@ UpdateSectionEntries (
}
// obtain page table base
- FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTranslationTableBaseAddress ();
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
// calculate index into first level translation table for start of modification
- FirstLevelIdx = (BaseAddress & ARM_SECTION_BASE_MASK) >> ARM_SECTION_BASE_SHIFT;
- ASSERT (FirstLevelIdx < FIRST_LEVEL_ENTRY_COUNT);
+ FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
// calculate number of 1MB first level entries this applies to
- NumSections = Length / ARM_PAGE_DESC_ENTRY_MVA_SIZE;
+ NumSections = Length / TT_DESCRIPTOR_SECTION_SIZE;
// iterate through each descriptor
for(i=0; i<NumSections; i++) {
CurrentDescriptor = FirstLevelTable[FirstLevelIdx + i];
// has this descriptor already been coverted to pages?
- if ((CurrentDescriptor & ARM_DESC_TYPE_MASK) != ARM_DESC_TYPE_PAGE_TABLE ) {
+ if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(CurrentDescriptor)) {
// forward this 1MB range to page table function instead
- Status = UpdatePageEntries ((FirstLevelIdx + i) << ARM_SECTION_BASE_SHIFT, ARM_PAGE_DESC_ENTRY_MVA_SIZE, Attributes, VirtualMask);
+ Status = UpdatePageEntries ((FirstLevelIdx + i) << TT_DESCRIPTOR_SECTION_BASE_SHIFT, TT_DESCRIPTOR_SECTION_SIZE, Attributes, VirtualMask);
} else {
// still a section entry
@@ -666,8 +561,8 @@ UpdateSectionEntries (
}
if (CurrentDescriptor != Descriptor) {
- Mva = (VOID *)(UINTN)(((UINTN)FirstLevelTable) << ARM_SECTION_BASE_SHIFT);
- if ((CurrentDescriptor & ARM_SECTION_C) == ARM_SECTION_C) {
+ Mva = (VOID *)(UINTN)(((UINTN)FirstLevelTable) << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
+ if ((CurrentDescriptor & TT_DESCRIPTOR_SECTION_CACHEABLE_MASK) == TT_DESCRIPTOR_SECTION_CACHEABLE_MASK) {
// The current section mapping is cacheable so Clean/Invalidate the MVA of the section
// Note assumes switch(Attributes), not ARMv7 possabilities
WriteBackInvalidateDataCacheRange (Mva, SIZE_1MB);
@@ -704,35 +599,20 @@ ConvertSectionToPages (
DEBUG ((EFI_D_PAGE, "Converting section at 0x%x to pages\n", (UINTN)BaseAddress));
// obtain page table base
- FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTranslationTableBaseAddress ();
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
// calculate index into first level translation table for start of modification
- FirstLevelIdx = (BaseAddress & ARM_SECTION_BASE_MASK) >> ARM_SECTION_BASE_SHIFT;
- ASSERT (FirstLevelIdx < FIRST_LEVEL_ENTRY_COUNT);
+ FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
// get section attributes and convert to page attributes
SectionDescriptor = FirstLevelTable[FirstLevelIdx];
- PageDescriptor = ARM_PAGE_TYPE_SMALL;
- PageDescriptor |= ((SectionDescriptor & ARM_SECTION_TEX_MASK) >> ARM_SECTION_TEX_SHIFT) << ARM_SMALL_PAGE_TEX_SHIFT;
- if ((SectionDescriptor & ARM_SECTION_B) != 0) {
- PageDescriptor |= ARM_PAGE_B;
- }
- if ((SectionDescriptor & ARM_SECTION_C) != 0) {
- PageDescriptor |= ARM_PAGE_C;
- }
- PageDescriptor |= ((SectionDescriptor & ARM_SECTION_AP10_MASK) >> ARM_SECTION_AP10_SHIFT) << ARM_PAGE_AP10_SHIFT;
- if ((SectionDescriptor & ARM_SECTION_AP2) != 0) {
- PageDescriptor |= ARM_PAGE_AP2;
- }
- if ((SectionDescriptor & ARM_SECTION_XN) != 0) {
- PageDescriptor |= ARM_PAGE_TYPE_SMALL_XN;
- }
- if ((SectionDescriptor & ARM_SECTION_nG) != 0) {
- PageDescriptor |= ARM_PAGE_nG;
- }
- if ((SectionDescriptor & ARM_SECTION_S) != 0) {
- PageDescriptor |= ARM_PAGE_S;
- }
+ PageDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE;
+ PageDescriptor |= TT_DESCRIPTOR_CONVERT_TO_PAGE_CACHE_POLICY(SectionDescriptor,0);
+ PageDescriptor |= TT_DESCRIPTOR_CONVERT_TO_PAGE_AP(SectionDescriptor);
+ PageDescriptor |= TT_DESCRIPTOR_CONVERT_TO_PAGE_XN(SectionDescriptor,0);
+ PageDescriptor |= TT_DESCRIPTOR_CONVERT_TO_PAGE_NG(SectionDescriptor);
+ PageDescriptor |= TT_DESCRIPTOR_CONVERT_TO_PAGE_S(SectionDescriptor);
// allocate a page table for the 4KB entries (we use up a full page even though we only need 1KB)
Status = gBS->AllocatePages (AllocateAnyPages, EfiBootServicesData, 1, &PageTableAddr);
@@ -743,15 +623,15 @@ ConvertSectionToPages (
PageTable = (volatile ARM_PAGE_TABLE_ENTRY *)(UINTN)PageTableAddr;
// write the page table entries out
- for (i=0; i<(ARM_PAGE_DESC_ENTRY_MVA_SIZE/SIZE_4KB); i++) {
- PageTable[i] = ((BaseAddress + (i << 12)) & ARM_SMALL_PAGE_BASE_MASK) | PageDescriptor;
+ for (i=0; i < TRANSLATION_TABLE_PAGE_COUNT; i++) {
+ PageTable[i] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseAddress + (i << 12)) | PageDescriptor;
}
// flush d-cache so descriptors make it back to uncached memory for subsequent table walks
WriteBackInvalidateDataCacheRange ((VOID *)(UINTN)PageTableAddr, SIZE_4KB);
// formulate page table entry, Domain=0, NS=0
- PageTableDescriptor = (((UINTN)PageTableAddr) & ARM_PAGE_DESC_BASE_MASK) | ARM_DESC_TYPE_PAGE_TABLE;
+ PageTableDescriptor = (((UINTN)PageTableAddr) & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) | TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
// write the page table entry out, repalcing section entry
FirstLevelTable[FirstLevelIdx] = PageTableDescriptor;
@@ -910,7 +790,3 @@ VIRTUAL_UNCACHED_PAGES_PROTOCOL gVirtualUncachedPages = {
CpuConvertPagesToUncachedVirtualAddress,
CpuReconvertPages
};
-
-
-
-