diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-06-15 18:49:09 +0200 |
---|---|---|
committer | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-07-07 14:33:47 +0200 |
commit | d7f03464b246b4111c8e21ba3dcd7b4a7b85fa7f (patch) | |
tree | 1c9e4a9e5e0b59d38216d3de6e892a7c550a5062 /ArmPkg/Library | |
parent | 12728e1137d37debdf6b98a8b8689106b4584908 (diff) | |
download | edk2-platforms-d7f03464b246b4111c8e21ba3dcd7b4a7b85fa7f.tar.xz |
ArmPkg: introduce base ArmMmuLib implementation
This base library encapsulates the MMU manipulation routines that have been
factored out of ArmLib. The functionality covers initial creation of the 1:1
mapping in the page tables, and remapping regions to change permissions or
cacheability attributes.
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Leif Lindholm <leif.lindholm@linaro.org>
Diffstat (limited to 'ArmPkg/Library')
-rw-r--r-- | ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 768 | ||||
-rw-r--r-- | ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S | 76 | ||||
-rw-r--r-- | ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c | 452 | ||||
-rw-r--r-- | ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S | 35 | ||||
-rw-r--r-- | ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm | 32 | ||||
-rw-r--r-- | ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf | 43 |
6 files changed, 1406 insertions, 0 deletions
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c new file mode 100644 index 0000000000..6e05e60850 --- /dev/null +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c @@ -0,0 +1,768 @@ +/** @file
+* File managing the MMU for ARMv8 architecture
+*
+* Copyright (c) 2011-2014, ARM Limited. All rights reserved.
+* Copyright (c) 2016, Linaro Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/ArmMmuLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+
+// We use this index definition to define an invalid block entry
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)
+
+STATIC
+UINT64
+ArmMemoryAttributeToPageAttribute (
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
+ )
+{
+ switch (Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
+
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
+
+ // Uncached and device mappings are treated as outer shareable by default,
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+
+ default:
+ ASSERT(0);
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ if (ArmReadCurrentEL () == AARCH64_EL2)
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
+ else
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
+ }
+}
+
+UINT64
+PageAttributeToGcdAttribute (
+ IN UINT64 PageAttributes
+ )
+{
+ UINT64 GcdAttributes;
+
+ switch (PageAttributes & TT_ATTR_INDX_MASK) {
+ case TT_ATTR_INDX_DEVICE_MEMORY:
+ GcdAttributes = EFI_MEMORY_UC;
+ break;
+ case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
+ GcdAttributes = EFI_MEMORY_WC;
+ break;
+ case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
+ GcdAttributes = EFI_MEMORY_WT;
+ break;
+ case TT_ATTR_INDX_MEMORY_WRITE_BACK:
+ GcdAttributes = EFI_MEMORY_WB;
+ break;
+ default:
+ DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
+ ASSERT (0);
+ // The Global Coherency Domain (GCD) value is defined as a bit set.
+ // Returning 0 means no attribute has been set.
+ GcdAttributes = 0;
+ }
+
+ // Determine protection attributes
+ if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
+ // Read only cases map to write-protect
+ GcdAttributes |= EFI_MEMORY_WP;
+ }
+
+ // Process eXecute Never attribute
+ if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
+ GcdAttributes |= EFI_MEMORY_XP;
+ }
+
+ return GcdAttributes;
+}
+
+ARM_MEMORY_REGION_ATTRIBUTES
+GcdAttributeToArmAttribute (
+ IN UINT64 GcdAttributes
+ )
+{
+ switch (GcdAttributes & 0xFF) {
+ case EFI_MEMORY_UC:
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
+ case EFI_MEMORY_WC:
+ return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
+ case EFI_MEMORY_WT:
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
+ case EFI_MEMORY_WB:
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
+ default:
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
+ ASSERT (0);
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
+ }
+}
+
+// Describe the T0SZ values for each translation table level
+typedef struct {
+ UINTN MinT0SZ;
+ UINTN MaxT0SZ;
+ UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
+ // the MaxT0SZ is not at the boundary of the table
+} T0SZ_DESCRIPTION_PER_LEVEL;
+
+// Map table for the corresponding Level of Table
+STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
+ { 16, 24, 24 }, // Table Level 0
+ { 25, 33, 33 }, // Table Level 1
+ { 34, 39, 42 } // Table Level 2
+};
+
+VOID
+GetRootTranslationTableInfo (
+ IN UINTN T0SZ,
+ OUT UINTN *TableLevel,
+ OUT UINTN *TableEntryCount
+ )
+{
+ UINTN Index;
+
+ // Identify the level of the root table from the given T0SZ
+ for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
+ if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
+ break;
+ }
+ }
+
+ // If we have not found the corresponding maximum T0SZ then we use the last one
+ if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
+ Index--;
+ }
+
+ // Get the level of the root table
+ if (TableLevel) {
+ *TableLevel = Index;
+ }
+
+ // The Size of the Table is 2^(T0SZ-LargestT0SZ)
+ if (TableEntryCount) {
+ *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
+ }
+}
+
+STATIC
+VOID
+ReplaceLiveEntry (
+ IN UINT64 *Entry,
+ IN UINT64 Value
+ )
+{
+ if (!ArmMmuEnabled ()) {
+ *Entry = Value;
+ } else {
+ ArmReplaceLiveTranslationEntry (Entry, Value);
+ }
+}
+
+STATIC
+VOID
+LookupAddresstoRootTable (
+ IN UINT64 MaxAddress,
+ OUT UINTN *T0SZ,
+ OUT UINTN *TableEntryCount
+ )
+{
+ UINTN TopBit;
+
+ // Check the parameters are not NULL
+ ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
+
+ // Look for the highest bit set in MaxAddress
+ for (TopBit = 63; TopBit != 0; TopBit--) {
+ if ((1ULL << TopBit) & MaxAddress) {
+ // MaxAddress top bit is found
+ TopBit = TopBit + 1;
+ break;
+ }
+ }
+ ASSERT (TopBit != 0);
+
+ // Calculate T0SZ from the top bit of the MaxAddress
+ *T0SZ = 64 - TopBit;
+
+ // Get the Table info from T0SZ
+ GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
+}
+
+STATIC
+UINT64*
+GetBlockEntryListFromAddress (
+ IN UINT64 *RootTable,
+ IN UINT64 RegionStart,
+ OUT UINTN *TableLevel,
+ IN OUT UINT64 *BlockEntrySize,
+ OUT UINT64 **LastBlockEntry
+ )
+{
+ UINTN RootTableLevel;
+ UINTN RootTableEntryCount;
+ UINT64 *TranslationTable;
+ UINT64 *BlockEntry;
+ UINT64 *SubTableBlockEntry;
+ UINT64 BlockEntryAddress;
+ UINTN BaseAddressAlignment;
+ UINTN PageLevel;
+ UINTN Index;
+ UINTN IndexLevel;
+ UINTN T0SZ;
+ UINT64 Attributes;
+ UINT64 TableAttributes;
+
+ // Initialize variable
+ BlockEntry = NULL;
+
+ // Ensure the parameters are valid
+ if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
+ return NULL;
+ }
+
+ // Ensure the Region is aligned on 4KB boundary
+ if ((RegionStart & (SIZE_4KB - 1)) != 0) {
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
+ return NULL;
+ }
+
+ // Ensure the required size is aligned on 4KB boundary and not 0
+ if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
+ return NULL;
+ }
+
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
+ // Get the Table info from T0SZ
+ GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
+
+ // If the start address is 0x0 then we use the size of the region to identify the alignment
+ if (RegionStart == 0) {
+ // Identify the highest possible alignment for the Region Size
+ BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
+ } else {
+ // Identify the highest possible alignment for the Base Address
+ BaseAddressAlignment = LowBitSet64 (RegionStart);
+ }
+
+ // Identify the Page Level the RegionStart must belong to. Note that PageLevel
+ // should be at least 1 since block translations are not supported at level 0
+ PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
+
+ // If the required size is smaller than the current block size then we need to go to the page below.
+ // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
+ // of the allocation size
+ while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
+ // It does not fit so we need to go a page level above
+ PageLevel++;
+ }
+
+ //
+ // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
+ //
+
+ TranslationTable = RootTable;
+ for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
+
+ if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
+ // Go to the next table
+ TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ // If we are at the last level then update the last level to next level
+ if (IndexLevel == PageLevel) {
+ // Enter the next level
+ PageLevel++;
+ }
+ } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
+ // If we are not at the last level then we need to split this BlockEntry
+ if (IndexLevel != PageLevel) {
+ // Retrieve the attributes from the block entry
+ Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
+
+ // Convert the block entry attributes into Table descriptor attributes
+ TableAttributes = TT_TABLE_AP_NO_PERMISSION;
+ if (Attributes & TT_NS) {
+ TableAttributes = TT_TABLE_NS;
+ }
+
+ // Get the address corresponding at this entry
+ BlockEntryAddress = RegionStart;
+ BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
+ // Shift back to right to set zero before the effective address
+ BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
+
+ // Set the correct entry type for the next page level
+ if ((IndexLevel + 1) == 3) {
+ Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ } else {
+ Attributes |= TT_TYPE_BLOCK_ENTRY;
+ }
+
+ // Create a new translation table
+ TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);
+ if (TranslationTable == NULL) {
+ return NULL;
+ }
+
+ // Populate the newly created lower level table
+ SubTableBlockEntry = TranslationTable;
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
+ *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
+ SubTableBlockEntry++;
+ }
+
+ // Fill the BlockEntry with the new TranslationTable
+ ReplaceLiveEntry (BlockEntry,
+ ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
+ }
+ } else {
+ if (IndexLevel != PageLevel) {
+ //
+ // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
+ //
+
+ // Create a new translation table
+ TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);
+ if (TranslationTable == NULL) {
+ return NULL;
+ }
+
+ ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
+
+ // Fill the new BlockEntry with the TranslationTable
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
+ }
+ }
+ }
+
+ // Expose the found PageLevel to the caller
+ *TableLevel = PageLevel;
+
+ // Now, we have the Table Level we can get the Block Size associated to this table
+ *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
+
+ // The last block of the root table depends on the number of entry in this table,
+ // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
+ *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
+ (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
+
+ return BlockEntry;
+}
+
+STATIC
+RETURN_STATUS
+UpdateRegionMapping (
+ IN UINT64 *RootTable,
+ IN UINT64 RegionStart,
+ IN UINT64 RegionLength,
+ IN UINT64 Attributes,
+ IN UINT64 BlockEntryMask
+ )
+{
+ UINT32 Type;
+ UINT64 *BlockEntry;
+ UINT64 *LastBlockEntry;
+ UINT64 BlockEntrySize;
+ UINTN TableLevel;
+
+ // Ensure the Length is aligned on 4KB boundary
+ if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ do {
+ // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
+ // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
+ BlockEntrySize = RegionLength;
+ BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
+ if (BlockEntry == NULL) {
+ // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
+ return RETURN_OUT_OF_RESOURCES;
+ }
+
+ if (TableLevel != 3) {
+ Type = TT_TYPE_BLOCK_ENTRY;
+ } else {
+ Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ }
+
+ do {
+ // Fill the Block Entry with attribute and output block address
+ *BlockEntry &= BlockEntryMask;
+ *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
+
+ // Go to the next BlockEntry
+ RegionStart += BlockEntrySize;
+ RegionLength -= BlockEntrySize;
+ BlockEntry++;
+
+ // Break the inner loop when next block is a table
+ // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
+ if (TableLevel != 3 &&
+ (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
+ break;
+ }
+ } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
+ } while (RegionLength != 0);
+
+ return RETURN_SUCCESS;
+}
+
+STATIC
+RETURN_STATUS
+FillTranslationTable (
+ IN UINT64 *RootTable,
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
+ )
+{
+ return UpdateRegionMapping (
+ RootTable,
+ MemoryRegion->VirtualBase,
+ MemoryRegion->Length,
+ ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
+ 0
+ );
+}
+
+RETURN_STATUS
+SetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN EFI_PHYSICAL_ADDRESS VirtualMask
+ )
+{
+ RETURN_STATUS Status;
+ ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
+ UINT64 *TranslationTable;
+
+ MemoryRegion.PhysicalBase = BaseAddress;
+ MemoryRegion.VirtualBase = BaseAddress;
+ MemoryRegion.Length = Length;
+ MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
+
+ TranslationTable = ArmGetTTBR0BaseAddress ();
+
+ Status = FillTranslationTable (TranslationTable, &MemoryRegion);
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }
+
+ // Invalidate all TLB entries so changes are synced
+ ArmInvalidateTlb ();
+
+ return RETURN_SUCCESS;
+}
+
+STATIC
+RETURN_STATUS
+SetMemoryRegionAttribute (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN UINT64 BlockEntryMask
+ )
+{
+ RETURN_STATUS Status;
+ UINT64 *RootTable;
+
+ RootTable = ArmGetTTBR0BaseAddress ();
+
+ Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }
+
+ // Invalidate all TLB entries so changes are synced
+ ArmInvalidateTlb ();
+
+ return RETURN_SUCCESS;
+}
+
+RETURN_STATUS
+ArmSetMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ UINT64 Val;
+
+ if (ArmReadCurrentEL () == AARCH64_EL1) {
+ Val = TT_PXN_MASK | TT_UXN_MASK;
+ } else {
+ Val = TT_XN_MASK;
+ }
+
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ Val,
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY);
+}
+
+RETURN_STATUS
+ArmClearMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ UINT64 Mask;
+
+ // XN maps to UXN in the EL1&0 translation regime
+ Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
+
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ 0,
+ Mask);
+}
+
+RETURN_STATUS
+ArmSetMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ TT_AP_RO_RO,
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY);
+}
+
+RETURN_STATUS
+ArmClearMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ TT_AP_RW_RW,
+ ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
+}
+
+RETURN_STATUS
+EFIAPI
+ArmConfigureMmu (
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
+ OUT VOID **TranslationTableBase OPTIONAL,
+ OUT UINTN *TranslationTableSize OPTIONAL
+ )
+{
+ VOID* TranslationTable;
+ UINTN TranslationTablePageCount;
+ UINT32 TranslationTableAttribute;
+ ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
+ UINT64 MaxAddress;
+ UINT64 TopAddress;
+ UINTN T0SZ;
+ UINTN RootTableEntryCount;
+ UINT64 TCR;
+ RETURN_STATUS Status;
+
+ if(MemoryTable == NULL) {
+ ASSERT (MemoryTable != NULL);
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ // Identify the highest address of the memory table
+ MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
+ MemoryTableEntry = MemoryTable;
+ while (MemoryTableEntry->Length != 0) {
+ TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
+ if (TopAddress > MaxAddress) {
+ MaxAddress = TopAddress;
+ }
+ MemoryTableEntry++;
+ }
+
+ // Lookup the Table Level to get the information
+ LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
+
+ //
+ // Set TCR that allows us to retrieve T0SZ in the subsequent functions
+ //
+ // Ideally we will be running at EL2, but should support EL1 as well.
+ // UEFI should not run at EL3.
+ if (ArmReadCurrentEL () == AARCH64_EL2) {
+ //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
+ TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_PS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_PS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_PS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_PS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_PS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_PS_256TB;
+ } else {
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (ArmReadCurrentEL () == AARCH64_EL1) {
+ // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
+ TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_IPS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_IPS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_IPS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_IPS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_IPS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_IPS_256TB;
+ } else {
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
+ return RETURN_UNSUPPORTED;
+ }
+
+ // Set TCR
+ ArmSetTCR (TCR);
+
+ // Allocate pages for translation table
+ TranslationTablePageCount = EFI_SIZE_TO_PAGES(RootTableEntryCount * sizeof(UINT64));
+ TranslationTable = (UINT64*)AllocateAlignedPages (TranslationTablePageCount, TT_ALIGNMENT_DESCRIPTION_TABLE);
+ if (TranslationTable == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
+ // functions without needing to pass this value across the functions. The MMU is only enabled
+ // after the translation tables are populated.
+ ArmSetTTBR0 (TranslationTable);
+
+ if (TranslationTableBase != NULL) {
+ *TranslationTableBase = TranslationTable;
+ }
+
+ if (TranslationTableSize != NULL) {
+ *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
+ }
+
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
+
+ // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
+ ArmDisableMmu ();
+ ArmDisableDataCache ();
+ ArmDisableInstructionCache ();
+
+ // Make sure nothing sneaked into the cache
+ ArmCleanInvalidateDataCache ();
+ ArmInvalidateInstructionCache ();
+
+ TranslationTableAttribute = TT_ATTR_INDX_INVALID;
+ while (MemoryTable->Length != 0) {
+ // Find the memory attribute for the Translation Table
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
+ ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
+ TranslationTableAttribute = MemoryTable->Attributes;
+ }
+
+ Status = FillTranslationTable (TranslationTable, MemoryTable);
+ if (RETURN_ERROR (Status)) {
+ goto FREE_TRANSLATION_TABLE;
+ }
+ MemoryTable++;
+ }
+
+ // Translate the Memory Attributes into Translation Table Register Attributes
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
+ TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
+ } else {
+ // If we failed to find a mapping that contains the root translation table then it probably means the translation table
+ // is not mapped in the given memory map.
+ ASSERT (0);
+ Status = RETURN_UNSUPPORTED;
+ goto FREE_TRANSLATION_TABLE;
+ }
+
+ // Set again TCR after getting the Translation Table attributes
+ ArmSetTCR (TCR);
+
+ ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
+
+ ArmDisableAlignmentCheck ();
+ ArmEnableInstructionCache ();
+ ArmEnableDataCache ();
+
+ ArmEnableMmu ();
+ return RETURN_SUCCESS;
+
+FREE_TRANSLATION_TABLE:
+ FreePages (TranslationTable, TranslationTablePageCount);
+ return Status;
+}
+
+RETURN_STATUS
+EFIAPI
+ArmMmuBaseLibConstructor (
+ VOID
+ )
+{
+ extern UINT32 ArmReplaceLiveTranslationEntrySize;
+
+ //
+ // The ArmReplaceLiveTranslationEntry () helper function may be invoked
+ // with the MMU off so we have to ensure that it gets cleaned to the PoC
+ //
+ WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
+ ArmReplaceLiveTranslationEntrySize);
+
+ return RETURN_SUCCESS;
+}
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S new file mode 100644 index 0000000000..7c5d205d94 --- /dev/null +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S @@ -0,0 +1,76 @@ +#------------------------------------------------------------------------------
+#
+# Copyright (c) 2016, Linaro Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+#include <AsmMacroIoLibV8.h>
+
+ .set CTRL_M_BIT, (1 << 0)
+
+ .macro __replace_entry, el
+
+ // disable the MMU
+ mrs x8, sctlr_el\el
+ bic x9, x8, #CTRL_M_BIT
+ msr sctlr_el\el, x9
+ isb
+
+ // write updated entry
+ str x1, [x0]
+
+ // invalidate again to get rid of stale clean cachelines that may
+ // have been filled speculatively since the last invalidate
+ dmb sy
+ dc ivac, x0
+
+ // flush the TLBs
+ .if \el == 1
+ tlbi vmalle1
+ .else
+ tlbi alle\el
+ .endif
+ dsb sy
+
+ // re-enable the MMU
+ msr sctlr_el\el, x8
+ isb
+ .endm
+
+//VOID
+//ArmReplaceLiveTranslationEntry (
+// IN UINT64 *Entry,
+// IN UINT64 Value
+// )
+ASM_PFX(ArmReplaceLiveTranslationEntry):
+
+ // disable interrupts
+ mrs x2, daif
+ msr daifset, #0xf
+ isb
+
+ // clean and invalidate first so that we don't clobber
+ // adjacent entries that are dirty in the caches
+ dc civac, x0
+ dsb ish
+
+ EL1_OR_EL2_OR_EL3(x3)
+1:__replace_entry 1
+ b 4f
+2:__replace_entry 2
+ b 4f
+3:__replace_entry 3
+
+4:msr daif, x2
+ ret
+
+ASM_PFX(ArmReplaceLiveTranslationEntrySize):
+ .long . - ArmReplaceLiveTranslationEntry
diff --git a/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c new file mode 100644 index 0000000000..4b6f4ce392 --- /dev/null +++ b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c @@ -0,0 +1,452 @@ +/** @file
+* File managing the MMU for ARMv7 architecture
+*
+* Copyright (c) 2011-2016, ARM Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/ArmV7.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PcdLib.h>
+
+#define ID_MMFR0_SHARELVL_SHIFT 12
+#define ID_MMFR0_SHARELVL_MASK 0xf
+#define ID_MMFR0_SHARELVL_ONE 0
+#define ID_MMFR0_SHARELVL_TWO 1
+
+#define ID_MMFR0_INNERSHR_SHIFT 28
+#define ID_MMFR0_INNERSHR_MASK 0xf
+#define ID_MMFR0_OUTERSHR_SHIFT 8
+#define ID_MMFR0_OUTERSHR_MASK 0xf
+
+#define ID_MMFR0_SHR_IMP_UNCACHED 0
+#define ID_MMFR0_SHR_IMP_HW_COHERENT 1
+#define ID_MMFR0_SHR_IGNORED 0xf
+
+UINTN
+EFIAPI
+ArmReadIdMmfr0 (
+ VOID
+ );
+
+BOOLEAN
+EFIAPI
+ArmHasMpExtensions (
+ VOID
+ );
+
+UINT32
+ConvertSectionAttributesToPageAttributes (
+ IN UINT32 SectionAttributes,
+ IN BOOLEAN IsLargePage
+ )
+{
+ UINT32 PageAttributes;
+
+ PageAttributes = 0;
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_CACHE_POLICY (SectionAttributes, IsLargePage);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_AP (SectionAttributes);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_XN (SectionAttributes, IsLargePage);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_NG (SectionAttributes);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_S (SectionAttributes);
+
+ return PageAttributes;
+}
+
+STATIC
+BOOLEAN
+PreferNonshareableMemory (
+ VOID
+ )
+{
+ UINTN Mmfr;
+ UINTN Val;
+
+ if (FeaturePcdGet (PcdNormalMemoryNonshareableOverride)) {
+ return TRUE;
+ }
+
+ //
+ // Check whether the innermost level of shareability (the level we will use
+ // by default to map normal memory) is implemented with hardware coherency
+ // support. Otherwise, revert to mapping as non-shareable.
+ //
+ Mmfr = ArmReadIdMmfr0 ();
+ switch ((Mmfr >> ID_MMFR0_SHARELVL_SHIFT) & ID_MMFR0_SHARELVL_MASK) {
+ case ID_MMFR0_SHARELVL_ONE:
+ // one level of shareability
+ Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK;
+ break;
+ case ID_MMFR0_SHARELVL_TWO:
+ // two levels of shareability
+ Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK;
+ break;
+ default:
+ // unexpected value -> shareable is the safe option
+ ASSERT (FALSE);
+ return FALSE;
+ }
+ return Val != ID_MMFR0_SHR_IMP_HW_COHERENT;
+}
+
+STATIC
+VOID
+PopulateLevel2PageTable (
+ IN UINT32 *SectionEntry,
+ IN UINT32 PhysicalBase,
+ IN UINT32 RemainLength,
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
+ )
+{
+ UINT32* PageEntry;
+ UINT32 Pages;
+ UINT32 Index;
+ UINT32 PageAttributes;
+ UINT32 SectionDescriptor;
+ UINT32 TranslationTable;
+ UINT32 BaseSectionAddress;
+
+ switch (Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_THROUGH;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ PageAttributes = TT_DESCRIPTOR_PAGE_DEVICE;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;
+ break;
+ default:
+ PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;
+ break;
+ }
+
+ if (PreferNonshareableMemory ()) {
+ PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;
+ }
+
+ // Check if the Section Entry has already been populated. Otherwise attach a
+ // Level 2 Translation Table to it
+ if (*SectionEntry != 0) {
+ // The entry must be a page table. Otherwise it exists an overlapping in the memory map
+ if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(*SectionEntry)) {
+ TranslationTable = *SectionEntry & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK;
+ } else if ((*SectionEntry & TT_DESCRIPTOR_SECTION_TYPE_MASK) == TT_DESCRIPTOR_SECTION_TYPE_SECTION) {
+ // Case where a virtual memory map descriptor overlapped a section entry
+
+ // Allocate a Level2 Page Table for this Section
+ TranslationTable = (UINTN)AllocatePages(EFI_SIZE_TO_PAGES(TRANSLATION_TABLE_PAGE_SIZE + TRANSLATION_TABLE_PAGE_ALIGNMENT));
+ TranslationTable = ((UINTN)TranslationTable + TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK;
+
+ // Translate the Section Descriptor into Page Descriptor
+ SectionDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (*SectionEntry, FALSE);
+
+ BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry);
+
+ // Populate the new Level2 Page Table for the section
+ PageEntry = (UINT32*)TranslationTable;
+ for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
+ PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseSectionAddress + (Index << 12)) | SectionDescriptor;
+ }
+
+ // Overwrite the section entry to point to the new Level2 Translation Table
+ *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
+ (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |
+ TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
+ } else {
+ // We do not support the other section type (16MB Section)
+ ASSERT(0);
+ return;
+ }
+ } else {
+ TranslationTable = (UINTN)AllocatePages(EFI_SIZE_TO_PAGES(TRANSLATION_TABLE_PAGE_SIZE + TRANSLATION_TABLE_PAGE_ALIGNMENT));
+ TranslationTable = ((UINTN)TranslationTable + TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK;
+
+ ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);
+
+ *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
+ (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |
+ TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
+ }
+
+ PageEntry = ((UINT32 *)(TranslationTable) + ((PhysicalBase & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT));
+ Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE;
+
+ for (Index = 0; Index < Pages; Index++) {
+ *PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(PhysicalBase) | PageAttributes;
+ PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;
+ }
+
+}
+
+STATIC
+VOID
+FillTranslationTable (
+ IN UINT32 *TranslationTable,
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
+ )
+{
+ UINT32 *SectionEntry;
+ UINT32 Attributes;
+ UINT32 PhysicalBase;
+ UINT64 RemainLength;
+
+ ASSERT(MemoryRegion->Length > 0);
+
+ if (MemoryRegion->PhysicalBase >= SIZE_4GB) {
+ return;
+ }
+
+ PhysicalBase = MemoryRegion->PhysicalBase;
+ RemainLength = MIN(MemoryRegion->Length, SIZE_4GB - PhysicalBase);
+
+ switch (MemoryRegion->Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ Attributes = TT_DESCRIPTOR_SECTION_DEVICE(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(1);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ Attributes = TT_DESCRIPTOR_SECTION_DEVICE(1);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(1);
+ break;
+ default:
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);
+ break;
+ }
+
+ if (PreferNonshareableMemory ()) {
+ Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
+ }
+
+ // Get the first section entry for this mapping
+ SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS(TranslationTable, MemoryRegion->VirtualBase);
+
+ while (RemainLength != 0) {
+ if (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0) {
+ if (RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) {
+ // Case: Physical address aligned on the Section Size (1MB) && the length is greater than the Section Size
+ *SectionEntry++ = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes;
+ PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;
+ } else {
+ // Case: Physical address aligned on the Section Size (1MB) && the length does not fill a section
+ PopulateLevel2PageTable (SectionEntry++, PhysicalBase, RemainLength, MemoryRegion->Attributes);
+
+ // It must be the last entry
+ break;
+ }
+ } else {
+ // Case: Physical address NOT aligned on the Section Size (1MB)
+ PopulateLevel2PageTable (SectionEntry++, PhysicalBase, RemainLength, MemoryRegion->Attributes);
+ // Aligned the address
+ PhysicalBase = (PhysicalBase + TT_DESCRIPTOR_SECTION_SIZE) & ~(TT_DESCRIPTOR_SECTION_SIZE-1);
+
+ // If it is the last entry
+ if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) {
+ break;
+ }
+ }
+ RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;
+ }
+}
+
+RETURN_STATUS
+EFIAPI
+ArmConfigureMmu (
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
+ OUT VOID **TranslationTableBase OPTIONAL,
+ OUT UINTN *TranslationTableSize OPTIONAL
+ )
+{
+ VOID* TranslationTable;
+ ARM_MEMORY_REGION_ATTRIBUTES TranslationTableAttribute;
+ UINT32 TTBRAttributes;
+
+ // Allocate pages for translation table.
+ TranslationTable = AllocatePages (EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_SECTION_SIZE + TRANSLATION_TABLE_SECTION_ALIGNMENT));
+ if (TranslationTable == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ TranslationTable = (VOID*)(((UINTN)TranslationTable + TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK);
+
+ if (TranslationTableBase != NULL) {
+ *TranslationTableBase = TranslationTable;
+ }
+
+ if (TranslationTableSize != NULL) {
+ *TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE;
+ }
+
+ ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
+
+ // By default, mark the translation table as belonging to a uncached region
+ TranslationTableAttribute = ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
+ while (MemoryTable->Length != 0) {
+ // Find the memory attribute for the Translation Table
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) && ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
+ TranslationTableAttribute = MemoryTable->Attributes;
+ }
+
+ FillTranslationTable (TranslationTable, MemoryTable);
+ MemoryTable++;
+ }
+
+ // Translate the Memory Attributes into Translation Table Register Attributes
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_NON_CACHEABLE : TTBR_NON_CACHEABLE;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_BACK_ALLOC : TTBR_WRITE_BACK_ALLOC;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_THROUGH : TTBR_WRITE_THROUGH;
+ } else {
+ ASSERT (0); // No support has been found for the attributes of the memory region that the translation table belongs to.
+ return RETURN_UNSUPPORTED;
+ }
+
+ if (TTBRAttributes & TTBR_SHAREABLE) {
+ if (PreferNonshareableMemory ()) {
+ TTBRAttributes ^= TTBR_SHAREABLE;
+ } else {
+ //
+ // Unlike the S bit in the short descriptors, which implies inner shareable
+ // on an implementation that supports two levels, the meaning of the S bit
+ // in the TTBR depends on the NOS bit, which defaults to Outer Shareable.
+ // However, we should only set this bit after we have confirmed that the
+ // implementation supports multiple levels, or else the NOS bit is UNK/SBZP
+ //
+ if (((ArmReadIdMmfr0 () >> 12) & 0xf) != 0) {
+ TTBRAttributes |= TTBR_NOT_OUTER_SHAREABLE;
+ }
+ }
+ }
+
+ ArmCleanInvalidateDataCache ();
+ ArmInvalidateInstructionCache ();
+
+ ArmDisableDataCache ();
+ ArmDisableInstructionCache();
+ // TLBs are also invalidated when calling ArmDisableMmu()
+ ArmDisableMmu ();
+
+ // Make sure nothing sneaked into the cache
+ ArmCleanInvalidateDataCache ();
+ ArmInvalidateInstructionCache ();
+
+ ArmSetTTBR0 ((VOID *)(UINTN)(((UINTN)TranslationTable & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) | (TTBRAttributes & 0x7F)));
+
+ //
+ // The TTBCR register value is undefined at reset in the Non-Secure world.
+ // Writing 0 has the effect of:
+ // Clearing EAE: Use short descriptors, as mandated by specification.
+ // Clearing PD0 and PD1: Translation Table Walk Disable is off.
+ // Clearing N: Perform all translation table walks through TTBR0.
+ // (0 is the default reset value in systems not implementing
+ // the Security Extensions.)
+ //
+ ArmSetTTBCR (0);
+
+ ArmSetDomainAccessControl (DOMAIN_ACCESS_CONTROL_NONE(15) |
+ DOMAIN_ACCESS_CONTROL_NONE(14) |
+ DOMAIN_ACCESS_CONTROL_NONE(13) |
+ DOMAIN_ACCESS_CONTROL_NONE(12) |
+ DOMAIN_ACCESS_CONTROL_NONE(11) |
+ DOMAIN_ACCESS_CONTROL_NONE(10) |
+ DOMAIN_ACCESS_CONTROL_NONE( 9) |
+ DOMAIN_ACCESS_CONTROL_NONE( 8) |
+ DOMAIN_ACCESS_CONTROL_NONE( 7) |
+ DOMAIN_ACCESS_CONTROL_NONE( 6) |
+ DOMAIN_ACCESS_CONTROL_NONE( 5) |
+ DOMAIN_ACCESS_CONTROL_NONE( 4) |
+ DOMAIN_ACCESS_CONTROL_NONE( 3) |
+ DOMAIN_ACCESS_CONTROL_NONE( 2) |
+ DOMAIN_ACCESS_CONTROL_NONE( 1) |
+ DOMAIN_ACCESS_CONTROL_CLIENT(0));
+
+ ArmEnableInstructionCache();
+ ArmEnableDataCache();
+ ArmEnableMmu();
+ return RETURN_SUCCESS;
+}
+
+RETURN_STATUS
+ArmSetMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return RETURN_UNSUPPORTED;
+}
+
+RETURN_STATUS
+ArmClearMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return RETURN_UNSUPPORTED;
+}
+
+RETURN_STATUS
+ArmSetMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return RETURN_UNSUPPORTED;
+}
+
+RETURN_STATUS
+ArmClearMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return RETURN_UNSUPPORTED;
+}
+
+RETURN_STATUS
+EFIAPI
+ArmMmuBaseLibConstructor (
+ VOID
+ )
+{
+ return RETURN_SUCCESS;
+}
diff --git a/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S new file mode 100644 index 0000000000..c89ee5e40b --- /dev/null +++ b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S @@ -0,0 +1,35 @@ +#------------------------------------------------------------------------------
+#
+# Copyright (c) 2016, Linaro Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+#include <AsmMacroIoLib.h>
+
+.text
+.align 2
+
+GCC_ASM_EXPORT (ArmReadIdMmfr0)
+GCC_ASM_EXPORT (ArmHasMpExtensions)
+
+#------------------------------------------------------------------------------
+
+ASM_PFX (ArmHasMpExtensions):
+ mrc p15,0,R0,c0,c0,5
+ // Get Multiprocessing extension (bit31)
+ lsr R0, R0, #31
+ bx LR
+
+ASM_PFX(ArmReadIdMmfr0):
+ mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 Register
+ bx lr
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm new file mode 100644 index 0000000000..4078394dcd --- /dev/null +++ b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm @@ -0,0 +1,32 @@ +//------------------------------------------------------------------------------
+//
+// Copyright (c) 2016, Linaro Limited. All rights reserved.
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD License
+// which accompanies this distribution. The full text of the license may be found at
+// http://opensource.org/licenses/bsd-license.php
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+//
+//------------------------------------------------------------------------------
+
+
+
+ INCLUDE AsmMacroExport.inc
+
+
+//------------------------------------------------------------------------------
+
+ RVCT_ASM_EXPORT ArmHasMpExtensions
+ mrc p15,0,R0,c0,c0,5
+ // Get Multiprocessing extension (bit31)
+ lsr R0, R0, #31
+ bx LR
+
+ RVCT_ASM_EXPORT ArmReadIdMmfr0
+ mrc p15, 0, r0, c0, c1, 4 ; Read ID_MMFR0 Register
+ bx lr
+
+ END
diff --git a/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf b/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf new file mode 100644 index 0000000000..1533c2944e --- /dev/null +++ b/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf @@ -0,0 +1,43 @@ +#/** @file
+#
+# Copyright (c) 2016 Linaro Ltd. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = ArmMmuBaseLib
+ FILE_GUID = da8f0232-fb14-42f0-922c-63104d2c70bd
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmMmuLib
+ CONSTRUCTOR = ArmMmuBaseLibConstructor
+
+[Sources.AARCH64]
+ AArch64/ArmMmuLibCore.c
+ AArch64/ArmMmuLibReplaceEntry.S
+
+[Sources.ARM]
+ Arm/ArmMmuLibCore.c
+ Arm/ArmMmuLibV7Support.S |GCC
+ Arm/ArmMmuLibV7Support.asm |RVCT
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ ArmLib
+ CacheMaintenanceLib
+ MemoryAllocationLib
+
+[Pcd.ARM]
+ gArmTokenSpaceGuid.PcdNormalMemoryNonshareableOverride
|