summaryrefslogtreecommitdiff
path: root/payloads/libpayload
diff options
context:
space:
mode:
Diffstat (limited to 'payloads/libpayload')
-rw-r--r--payloads/libpayload/arch/arm64/mmu.c193
-rw-r--r--payloads/libpayload/include/arm64/arch/cache.h16
-rw-r--r--payloads/libpayload/include/arm64/arch/mmu.h3
3 files changed, 76 insertions, 136 deletions
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
index f0781f5ec1..e2a0cb18b6 100644
--- a/payloads/libpayload/arch/arm64/mmu.c
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -44,6 +44,12 @@ static uint64_t *xlat_addr;
static int free_idx;
static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)));
+static const char * const tag_to_string[] = {
+ [TYPE_NORMAL_MEM] = "normal",
+ [TYPE_DEV_MEM] = "device",
+ [TYPE_DMA_MEM] = "uncached",
+};
+
/*
* The usedmem_ranges is used to describe all the memory ranges that are
* actually used by payload i.e. _start -> _end in linker script and the
@@ -56,25 +62,12 @@ static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)
*/
static struct mmu_ranges usedmem_ranges;
-static const uint64_t level_to_addr_mask[] = {
- L1_ADDR_MASK,
- L2_ADDR_MASK,
- L3_ADDR_MASK,
-};
-
-static const uint64_t level_to_addr_shift[] = {
- L1_ADDR_SHIFT,
- L2_ADDR_SHIFT,
- L3_ADDR_SHIFT,
-};
-
static void __attribute__((noreturn)) mmu_error(void)
{
halt();
}
-/*
- * Func : get_block_attr
+/* Func : get_block_attr
* Desc : Get block descriptor attributes based on the value of tag in memrange
* region
*/
@@ -107,21 +100,7 @@ static uint64_t get_block_attr(unsigned long tag)
return attr;
}
-/*
- * Func : get_index_from_addr
- * Desc : Get index into table at a given level using appropriate bits from the
- * base address
- */
-static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
-{
- uint64_t mask = level_to_addr_mask[level-1];
- uint8_t shift = level_to_addr_shift[level-1];
-
- return ((addr & mask) >> shift);
-}
-
-/*
- * Func : table_desc_valid
+/* Func : table_desc_valid
* Desc : Check if a table entry contains valid desc
*/
static uint64_t table_desc_valid(uint64_t desc)
@@ -129,29 +108,34 @@ static uint64_t table_desc_valid(uint64_t desc)
return((desc & TABLE_DESC) == TABLE_DESC);
}
-/*
- * Func : get_new_table
- * Desc : Return the next free XLAT table from ttb buffer
+/* Func : setup_new_table
+ * Desc : Get next free table from TTB and set it up to match old parent entry.
*/
-static uint64_t *get_new_table(void)
+static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
{
- uint64_t *new;
+ uint64_t *new, *entry;
- if (free_idx >= max_tables) {
- printf("ARM64 MMU: No free table\n");
- return NULL;
- }
+ assert(free_idx < max_tables);
new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
free_idx++;
- memset(new, 0, GRANULE_SIZE);
+ if (!desc) {
+ memset(new, 0, GRANULE_SIZE);
+ } else {
+ /* Can reuse old parent entry, but may need to adjust type. */
+ if (xlat_size == L3_XLAT_SIZE)
+ desc |= PAGE_DESC;
+
+ for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE;
+ entry++, desc += xlat_size)
+ *entry = desc;
+ }
return new;
}
-/*
- * Func : get_table_from_desc
+/* Func : get_table_from_desc
* Desc : Get next level table address from table descriptor
*/
static uint64_t *get_table_from_desc(uint64_t desc)
@@ -160,27 +144,23 @@ static uint64_t *get_table_from_desc(uint64_t desc)
return ptr;
}
-/*
- * Func: get_next_level_table
- * Desc: Check if the table entry is a valid descriptor. If not, allocate new
+/* Func: get_next_level_table
+ * Desc: Check if the table entry is a valid descriptor. If not, initialize new
* table, update the entry and return the table addr. If valid, return the addr.
*/
-static uint64_t *get_next_level_table(uint64_t *ptr)
+static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
{
uint64_t desc = *ptr;
if (!table_desc_valid(desc)) {
- uint64_t *new_table = get_new_table();
- if (new_table == NULL)
- return NULL;
+ uint64_t *new_table = setup_new_table(desc, xlat_size);
desc = ((uint64_t)new_table) | TABLE_DESC;
*ptr = desc;
}
return get_table_from_desc(desc);
}
-/*
- * Func : init_xlat_table
+/* Func : init_xlat_table
* Desc : Given a base address and size, it identifies the indices within
* different level XLAT tables which map the given base addr. Similar to table
* walk, except that all invalid entries during the walk are updated
@@ -191,18 +171,16 @@ static uint64_t init_xlat_table(uint64_t base_addr,
uint64_t size,
uint64_t tag)
{
- uint64_t l1_index = get_index_from_addr(base_addr,1);
- uint64_t l2_index = get_index_from_addr(base_addr,2);
- uint64_t l3_index = get_index_from_addr(base_addr,3);
+ uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
+ uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
+ uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
uint64_t *table = xlat_addr;
uint64_t desc;
uint64_t attr = get_block_attr(tag);
- /*
- * L1 table lookup
+ /* L1 table lookup
* If VA has bits more than L2 can resolve, lookup starts at L1
- * Assumption: we don't need L0 table in coreboot
- */
+ * Assumption: we don't need L0 table in coreboot */
if (BITS_PER_VA > L1_ADDR_SHIFT) {
if ((size >= L1_XLAT_SIZE) &&
IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
@@ -213,94 +191,71 @@ static uint64_t init_xlat_table(uint64_t base_addr,
table[l1_index] = desc;
/* L2 lookup is not required */
return L1_XLAT_SIZE;
- } else {
- table = get_next_level_table(&table[l1_index]);
- if (!table)
- return 0;
}
+ table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
}
- /*
- * L2 table lookup
+ /* L2 table lookup
* If lookup was performed at L1, L2 table addr is obtained from L1 desc
- * else, lookup starts at ttbr address
- */
+ * else, lookup starts at ttbr address */
if ((size >= L2_XLAT_SIZE) &&
IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
- /*
- * If block address is aligned and size is greater than or equal
- * to size addressed by each L2 entry, we can
- * directly store a block desc
- */
+ /* If block address is aligned and size is greater than
+ * or equal to size addressed by each L2 entry, we can
+ * directly store a block desc */
desc = base_addr | BLOCK_DESC | attr;
table[l2_index] = desc;
/* L3 lookup is not required */
return L2_XLAT_SIZE;
- } else {
- /* L2 entry stores a table descriptor */
- table = get_next_level_table(&table[l2_index]);
- if (!table)
- return 0;
}
+ /* L2 entry stores a table descriptor */
+ table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
+
/* L3 table lookup */
desc = base_addr | PAGE_DESC | attr;
table[l3_index] = desc;
return L3_XLAT_SIZE;
}
-/*
- * Func : sanity_check
- * Desc : Check if the address is aligned and size is atleast the granule size
+/* Func : sanity_check
+ * Desc : Check address/size alignment of a table or page.
*/
-static uint64_t sanity_check(uint64_t addr,
- uint64_t size)
+static void sanity_check(uint64_t addr, uint64_t size)
{
- /* Address should be atleast 64 KiB aligned */
- if (addr & GRANULE_SIZE_MASK)
- return 1;
-
- /* Size should be atleast granule size */
- if (size < GRANULE_SIZE)
- return 1;
-
- return 0;
+ assert(!(addr & GRANULE_SIZE_MASK) &&
+ !(size & GRANULE_SIZE_MASK) &&
+ size >= GRANULE_SIZE);
}
-/*
- * Func : init_mmap_entry
- * Desc : For each mmap entry, this function calls init_xlat_table with the base
+/* Func : mmu_config_range
+ * Desc : This function repeatedly calls init_xlat_table with the base
* address. Based on size returned from init_xlat_table, base_addr is updated
* and subsequent calls are made for initializing the xlat table until the whole
* region is initialized.
*/
-static void init_mmap_entry(struct mmu_memrange *r)
+void mmu_config_range(void *start, size_t size, uint64_t tag)
{
- uint64_t base_addr = r->base;
- uint64_t size = r->size;
- uint64_t tag = r->type;
+ uint64_t base_addr = (uintptr_t)start;
uint64_t temp_size = size;
- while (temp_size) {
- uint64_t ret;
-
- if (sanity_check(base_addr,temp_size)) {
- printf("Libpayload: ARM64 MMU: sanity check failed\n");
- return;
- }
-
- ret = init_xlat_table(base_addr + (size - temp_size),
- temp_size, tag);
+ assert(tag < ARRAY_SIZE(tag_to_string));
+ printf("Libpayload: ARM64 MMU: Mapping address range [%p:%p) as %s\n",
+ start, start + size, tag_to_string[tag]);
+ sanity_check(base_addr, temp_size);
- if (ret == 0)
- return;
+ while (temp_size)
+ temp_size -= init_xlat_table(base_addr + (size - temp_size),
+ temp_size, tag);
- temp_size -= ret;
- }
+ /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
+ dsb();
+ tlbiall_current();
+ dsb();
+ isb();
}
-/*
- * Func : mmu_init
+/* Func : mmu_init
* Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as
* the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of
* tables that can be used
@@ -308,8 +263,6 @@ static void init_mmap_entry(struct mmu_memrange *r)
*/
uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
{
- struct mmu_memrange devrange = { 0, 0x80000000, TYPE_DEV_MEM };
-
int i = 0;
xlat_addr = (uint64_t *)&ttb_buffer;
@@ -321,11 +274,12 @@ uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
printf("Libpayload ARM64: TTB_BUFFER: 0x%p Max Tables: %d\n",
(void*)xlat_addr, max_tables);
- init_mmap_entry(&devrange);
+ mmu_config_range(NULL, 0x80000000, TYPE_DEV_MEM);
- for (; i < mmu_ranges->used; i++) {
- init_mmap_entry(&mmu_ranges->entries[i]);
- }
+ for (; i < mmu_ranges->used; i++)
+ mmu_config_range((void *)mmu_ranges->entries[i].base,
+ mmu_ranges->entries[i].size,
+ mmu_ranges->entries[i].type);
printf("Libpayload ARM64: MMU init done\n");
return 0;
@@ -388,8 +342,7 @@ void mmu_enable(void)
/* Initialize TTBR */
raw_write_ttbr0_current((uintptr_t)xlat_addr);
- /* Ensure all translation table writes are committed before enabling MMU */
- dsb();
+ /* Ensure system register writes are committed before enabling MMU */
isb();
/* Enable MMU */
diff --git a/payloads/libpayload/include/arm64/arch/cache.h b/payloads/libpayload/include/arm64/arch/cache.h
index 7248869111..de68cee3f1 100644
--- a/payloads/libpayload/include/arm64/arch/cache.h
+++ b/payloads/libpayload/include/arm64/arch/cache.h
@@ -117,20 +117,4 @@ static inline void icache_invalidate_all(void)
: : : "memory");
}
-/*
- * Generalized setup/init functions
- */
-
-enum dcache_policy {
- DCACHE_OFF,
- DCACHE_WRITEBACK,
- DCACHE_WRITETHROUGH,
-};
-
-/* disable the mmu for a range. Primarily useful to lock out address 0. */
-void mmu_disable_range(unsigned long start_mb, unsigned long size_mb);
-/* mmu range configuration (set dcache policy) */
-void mmu_config_range(unsigned long start_mb, unsigned long size_mb,
- enum dcache_policy policy);
-
#endif /* ARM64_CACHE_H */
diff --git a/payloads/libpayload/include/arm64/arch/mmu.h b/payloads/libpayload/include/arm64/arch/mmu.h
index 79bc783709..30a1973cf7 100644
--- a/payloads/libpayload/include/arm64/arch/mmu.h
+++ b/payloads/libpayload/include/arm64/arch/mmu.h
@@ -175,6 +175,9 @@ void mmu_enable(void);
/* Disable mmu */
void mmu_disable(void);
+/* Change a memory type for a range of bytes at runtime. */
+void mmu_config_range(void *start, size_t size, uint64_t tag);
+
/*
* Based on the memory ranges provided in coreboot tables,
* initialize the mmu_memranges used for mmu initialization