summaryrefslogtreecommitdiff
path: root/payloads
diff options
context:
space:
mode:
Diffstat (limited to 'payloads')
-rw-r--r--payloads/libpayload/arch/arm64/mmu.c29
-rw-r--r--payloads/libpayload/include/arm64/arch/mmu.h59
-rw-r--r--payloads/libpayload/include/stdlib.h1
3 files changed, 40 insertions, 49 deletions
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
index 204412efd5..f0781f5ec1 100644
--- a/payloads/libpayload/arch/arm64/mmu.c
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -200,12 +200,24 @@ static uint64_t init_xlat_table(uint64_t base_addr,
/*
* L1 table lookup
- * If VA has bits more than 41, lookup starts at L1
+ * If VA has bits more than L2 can resolve, lookup starts at L1
+ * Assumption: we don't need L0 table in coreboot
*/
- if (l1_index) {
- table = get_next_level_table(&table[l1_index]);
- if (!table)
- return 0;
+ if (BITS_PER_VA > L1_ADDR_SHIFT) {
+ if ((size >= L1_XLAT_SIZE) &&
+ IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
+ /* If block address is aligned and size is greater than
+ * or equal to size addressed by each L1 entry, we can
+ * directly store a block desc */
+ desc = base_addr | BLOCK_DESC | attr;
+ table[l1_index] = desc;
+ /* L2 lookup is not required */
+ return L1_XLAT_SIZE;
+ } else {
+ table = get_next_level_table(&table[l1_index]);
+ if (!table)
+ return 0;
+ }
}
/*
@@ -213,10 +225,11 @@ static uint64_t init_xlat_table(uint64_t base_addr,
* If lookup was performed at L1, L2 table addr is obtained from L1 desc
* else, lookup starts at ttbr address
*/
- if (!l3_index && (size >= L2_XLAT_SIZE)) {
+ if ((size >= L2_XLAT_SIZE) &&
+ IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
/*
* If block address is aligned and size is greater than or equal
- * to 512MiB i.e. size addressed by each L2 entry, we can
+ * to size addressed by each L2 entry, we can
* directly store a block desc
*/
desc = base_addr | BLOCK_DESC | attr;
@@ -369,7 +382,7 @@ void mmu_enable(void)
/* Initialize TCR flags */
raw_write_tcr_current(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
- TCR_SH0_IS | TCR_TG0_64KB | TCR_PS_64GB |
+ TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB |
TCR_TBI_USED);
/* Initialize TTBR */
diff --git a/payloads/libpayload/include/arm64/arch/mmu.h b/payloads/libpayload/include/arm64/arch/mmu.h
index e241d7593f..79bc783709 100644
--- a/payloads/libpayload/include/arm64/arch/mmu.h
+++ b/payloads/libpayload/include/arm64/arch/mmu.h
@@ -49,14 +49,6 @@ struct mmu_ranges {
*/
extern char _start[], _end[];
-/* IMPORTANT!!!!!!!
- * Assumptions made:
- * Granule size is 64KiB
- * BITS per Virtual address is 33
- * All the calculations for tables L1,L2 and L3 are based on these assumptions
- * If these values are changed, recalculate the other macros as well
- */
-
/* Memory attributes for mmap regions
* These attributes act as tag values for memrange regions
*/
@@ -89,47 +81,32 @@ extern char _start[], _end[];
/* XLAT Table Init Attributes */
#define VA_START 0x0
-/* If BITS_PER_VA or GRANULE_SIZE are changed, recalculate and change the
- macros following them */
#define BITS_PER_VA 33
-/* Granule size of 64KB is being used */
#define MIN_64_BIT_ADDR (1UL << 32)
-#define XLAT_TABLE_MASK ~(0xffffUL)
-#define GRANULE_SIZE_SHIFT 16
+/* Granule size of 4KB is being used */
+#define GRANULE_SIZE_SHIFT 12
#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
-#define GRANULE_SIZE_MASK ((1 << 16) - 1)
-
-#define L1_ADDR_SHIFT 42
-#define L2_ADDR_SHIFT 29
-#define L3_ADDR_SHIFT 16
-
-#define L1_ADDR_MASK (0UL << L1_ADDR_SHIFT)
-#define L2_ADDR_MASK (0xfUL << L2_ADDR_SHIFT)
-#define L3_ADDR_MASK (0x1fffUL << L3_ADDR_SHIFT)
-
-/* Dependent on BITS_PER_VA and GRANULE_SIZE */
-#define INIT_LEVEL 2
-#define XLAT_MAX_LEVEL 3
-
-/* Each entry in XLAT table is 8 bytes */
-#define XLAT_ENTRY_SHIFT 3
-#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SHIFT)
+#define XLAT_TABLE_MASK (~(0UL) << GRANULE_SIZE_SHIFT)
+#define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1)
-#define XLAT_TABLE_SHIFT GRANULE_SIZE_SHIFT
-#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SHIFT)
+#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3)
+#define L1_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2)
+#define L2_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1)
+#define L3_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0)
-#define XLAT_NUM_ENTRIES_SHIFT (XLAT_TABLE_SHIFT - XLAT_ENTRY_SHIFT)
-#define XLAT_NUM_ENTRIES (1 << XLAT_NUM_ENTRIES_SHIFT)
+#if BITS_PER_VA > L1_ADDR_SHIFT + BITS_RESOLVED_PER_LVL
+ #error "BITS_PER_VA too large (we don't have L0 table support)"
+#endif
-#define L3_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT)
-#define L2_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT)
-#define L1_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT)
+#define L1_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT)
+#define L2_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT)
+#define L3_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT)
/* These macros give the size of the region addressed by each entry of a xlat
table at any given level */
-#define L3_XLAT_SIZE (1 << L3_XLAT_SIZE_SHIFT)
-#define L2_XLAT_SIZE (1 << L2_XLAT_SIZE_SHIFT)
-#define L1_XLAT_SIZE (1 << L1_XLAT_SIZE_SHIFT)
+#define L3_XLAT_SIZE (1UL << L3_ADDR_SHIFT)
+#define L2_XLAT_SIZE (1UL << L2_ADDR_SHIFT)
+#define L1_XLAT_SIZE (1UL << L1_ADDR_SHIFT)
/* Block indices required for MAIR */
#define BLOCK_INDEX_MEM_DEV_NGNRNE 0
@@ -184,7 +161,7 @@ extern char _start[], _end[];
#define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT)
#define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT)
-#define DMA_DEFAULT_SIZE (0x200 * GRANULE_SIZE)
+#define DMA_DEFAULT_SIZE (32 * MiB)
#define TTB_DEFAULT_SIZE 0x100000
#define MB_SIZE (1UL << 20)
diff --git a/payloads/libpayload/include/stdlib.h b/payloads/libpayload/include/stdlib.h
index deb5e3dac4..08c7d06c3e 100644
--- a/payloads/libpayload/include/stdlib.h
+++ b/payloads/libpayload/include/stdlib.h
@@ -39,6 +39,7 @@
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define ALIGN_UP(x,a) ALIGN((x),(a))
#define ALIGN_DOWN(x,a) ((x) & ~((typeof(x))(a)-1UL))
+#define IS_ALIGNED(x,a) (((x) & ((typeof(x))(a)-1UL)) == 0)
/**
* @defgroup malloc Memory allocation functions