summaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2015-09-29 17:28:15 -0700
committerJulius Werner <jwerner@chromium.org>2015-11-11 05:07:48 +0100
commit03a0a6517210b4f53082a499df2a7e743ae7452e (patch)
tree34449594c37f6433f59c3054bde8bbd0358ecc44 /src/arch
parent95b97848cc96682e9574076001bfde23488b27b5 (diff)
downloadcoreboot-03a0a6517210b4f53082a499df2a7e743ae7452e.tar.xz
armv7: mmu: Make fine grained page tables work across stages
Among its other restrictions (which are noted in a comment above the function prototype and stay in place), our makeshift fine-grained page table support for ARM32 has the undocumented feature that it relies on a global bookkeeping variable, causing all sorts of fun surprises when you try to use it from multiple stages during the same boot. This patch redesigns the bookkeeping to stay completely inline in the (persistent) TTB which should resolve the issue. (This had not been a problem on any of our platforms for now... I just noticed this because I was trying to solve the same issue on ARM64.) BRANCH=None BUG=None TEST=Booted veyron_jerry. Mapped a second fine-grained memory range from romstage, confirmed that it finds the next free spot and leaves the bootblock table in place. Change-Id: I325866828b4ff251142e1131ce78b571edcc9cf9 Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: http://review.coreboot.org/12074 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@chromium.org> Reviewed-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/arm/armv7/mmu.c26
-rw-r--r--src/arch/arm/include/armv7/arch/cache.h3
2 files changed, 21 insertions, 8 deletions
diff --git a/src/arch/arm/armv7/mmu.c b/src/arch/arm/armv7/mmu.c
index 727da5cf72..8c2f78c4d0 100644
--- a/src/arch/arm/armv7/mmu.c
+++ b/src/arch/arm/armv7/mmu.c
@@ -97,13 +97,17 @@ typedef uint64_t pte_t;
typedef uint32_t pte_t;
#endif /* CONFIG_ARM_LPAE */
+/* We set the first PTE to a sentinel value that cannot occur naturally (has
+ * attributes set but bits [1:0] are 0 -> unmapped) to mark unused subtables. */
+#define ATTR_UNUSED 0xBADbA6E0
+#define SUBTABLE_PTES (1 << (BLOCK_SHIFT - PAGE_SHIFT))
+
/*
* mask/shift/size for pages and blocks
*/
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define BLOCK_SIZE (1UL << BLOCK_SHIFT)
-#define SUBTABLE_SIZE ((1 << (BLOCK_SHIFT - PAGE_SHIFT)) * sizeof(pte_t))
/*
* MAIR Index
@@ -113,7 +117,6 @@ typedef uint32_t pte_t;
#define MAIR_INDX_WB 2
static pte_t *const ttb_buff = (void *)_ttb;
-static int used_tables = 0;
/* Not all boards want to use subtables and declare them in memlayout.ld. This
* outputs two 0x00000000 symbols if they don't, making _ttb_subtables_size 0.
@@ -156,12 +159,17 @@ static void mmu_fill_table(pte_t *table, u32 start_idx, u32 end_idx,
static pte_t *mmu_create_subtable(pte_t *pgd_entry)
{
- if (used_tables >= _ttb_subtables_size / SUBTABLE_SIZE)
- die("Not enough room for another sub-pagetable!");
+ pte_t *table = (pte_t *)_ttb_subtables;
+
+ /* Find unused subtable (first PTE == ATTR_UNUSED). */
+ while (table[0] != ATTR_UNUSED) {
+ table += SUBTABLE_PTES;
+ if ((pte_t *)_ettb_subtables - table <= 0)
+ die("Not enough room for another sub-pagetable!");
+ }
/* We assume that *pgd_entry must already be a valid block mapping. */
uintptr_t start_addr = (uintptr_t)(*pgd_entry & BLOCK_MASK);
- pte_t *table = (void *)(_ttb_subtables + used_tables++ * SUBTABLE_SIZE);
printk(BIOS_DEBUG, "Creating new subtable @%p for [%#.8x:%#.8lx)\n",
table, start_addr, start_addr + BLOCK_SIZE);
@@ -172,8 +180,7 @@ static pte_t *mmu_create_subtable(pte_t *pgd_entry)
attr = ((attr & ~(1 << 4)) | (1 << 0));
if (attr & ATTR_BLOCK)
attr = (attr & ~ATTR_BLOCK) | ATTR_PAGE;
- mmu_fill_table(table, 0, SUBTABLE_SIZE / sizeof(pte_t),
- start_addr, PAGE_SHIFT, attr);
+ mmu_fill_table(table, 0, SUBTABLE_PTES, start_addr, PAGE_SHIFT, attr);
/* Replace old entry in upper level table to point at subtable. */
*pgd_entry = (pte_t)(uintptr_t)table | ATTR_NEXTLEVEL;
@@ -265,6 +272,11 @@ void mmu_config_range(u32 start_mb, u32 size_mb, enum dcache_policy policy)
*/
void mmu_init(void)
{
+ /* Initially mark all subtables as unused (first PTE == ATTR_UNUSED). */
+ pte_t *table = (pte_t *)_ttb_subtables;
+ for (; (pte_t *)_ettb_subtables - table > 0; table += SUBTABLE_PTES)
+ table[0] = ATTR_UNUSED;
+
if (CONFIG_ARM_LPAE) {
pte_t *const pgd_buff = (pte_t*)(_ttb + 16*KiB);
pte_t *pmd = ttb_buff;
diff --git a/src/arch/arm/include/armv7/arch/cache.h b/src/arch/arm/include/armv7/arch/cache.h
index df44c47f21..1e6477768c 100644
--- a/src/arch/arm/include/armv7/arch/cache.h
+++ b/src/arch/arm/include/armv7/arch/cache.h
@@ -384,7 +384,8 @@ void tlb_invalidate_all(void);
* Generalized setup/init functions
*/
-/* mmu initialization (set page table address, set permissions, etc) */
+/* MMU initialization (set page table base, permissions, initialize subtable
+ * buffer, etc.). Must only be called ONCE PER BOOT, before any mappings. */
void mmu_init(void);
enum dcache_policy {