summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arch/x86/index.md2
-rw-r--r--Documentation/arch/x86/pae.md15
-rw-r--r--Documentation/security/memory_clearing.md4
-rw-r--r--src/cpu/x86/pae/pgtbl.c127
-rw-r--r--src/include/cpu/x86/pae.h25
5 files changed, 173 insertions, 0 deletions
diff --git a/Documentation/arch/x86/index.md b/Documentation/arch/x86/index.md
index 3ecb9803a4..73c982385a 100644
--- a/Documentation/arch/x86/index.md
+++ b/Documentation/arch/x86/index.md
@@ -2,6 +2,8 @@
This section contains documentation about coreboot on x86 architecture.
+* [x86 PAE support](pae.md)
+
## State of x86_64 support
At the moment there's no single board that supports x86_64 or to be exact
`ARCH_RAMSTAGE_X86_64` and `ARCH_ROMSTAGE_X86_64`.
diff --git a/Documentation/arch/x86/pae.md b/Documentation/arch/x86/pae.md
new file mode 100644
index 0000000000..54cd82f2e4
--- /dev/null
+++ b/Documentation/arch/x86/pae.md
@@ -0,0 +1,15 @@
+# x86_32 PAE documentation
+
+Due to missing x86_64 support it's required to use PAE enabled x86_32 code.
+The corresponding functions can be found in ``src/cpu/x86/pae/``.
+
+## Memory clearing helper functions
+
+To clear all DRAM on request of the
+[Security API](../../security/memory_clearing.md), a helper function can be used
+called `memset_pae`.
+The function has additional requirements in contrast to `memset`, and has more
+overhead as it uses virtual memory to access memory above 4GiB.
+Memory is cleared in 2MiB chunks, which might take a while.
+
+Make sure to enable caches through MTRRs, otherwise `memset_pae` will be slow!
diff --git a/Documentation/security/memory_clearing.md b/Documentation/security/memory_clearing.md
index 3d985925d9..e5c19256b9 100644
--- a/Documentation/security/memory_clearing.md
+++ b/Documentation/security/memory_clearing.md
@@ -42,3 +42,7 @@ Without MTRRs (and caches enabled) clearing memory takes multiple seconds.
As some platforms place code and stack in DRAM (FSP1.0), the regions can be
skipped.
+
+## Architecture specific implementations
+
+* [x86 PAE](../arch/x86/pae.md)
diff --git a/src/cpu/x86/pae/pgtbl.c b/src/cpu/x86/pae/pgtbl.c
index 9c921342f1..f54a1c35db 100644
--- a/src/cpu/x86/pae/pgtbl.c
+++ b/src/cpu/x86/pae/pgtbl.c
@@ -2,6 +2,8 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2005 Yinghai Lu
+ * Copyright (C) 2019 9elements Agency GmbH
+ * Copyright (C) 2019 Facebook Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,6 +24,7 @@
#include <cpu/x86/pae.h>
#include <string.h>
#include <symbols.h>
+#include <assert.h>
#define PDPTE_PRES (1ULL << 0)
#define PDPTE_ADDR_MASK (~((1ULL << 12) - 1))
@@ -59,9 +62,20 @@
#define PTE_IDX_SHIFT 12
#define PTE_IDX_MASK 0x1ff
+#define OVERLAP(a, b, s, e) ((b) > (s) && (a) < (e))
+
static const size_t s2MiB = 2 * MiB;
static const size_t s4KiB = 4 * KiB;
+struct pde {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+} __packed;
+struct pg_table {
+ struct pde pd[2048];
+ struct pde pdp[512];
+} __packed;
+
void paging_enable_pae_cr3(uintptr_t cr3)
{
/* Load the page table address */
@@ -101,6 +115,119 @@ void paging_disable_pae(void)
write_cr4(cr4);
}
+/*
+ * Use PAE to map a page and then memset it with the pattern specified.
+ * In order to use PAE pagetables for virtual addressing are set up and reloaded
+ * on a 2MiB boundary. After the function is done, virtual addressing mode is
+ * disabled again. The PAT are set to all cachable, but MTRRs still apply.
+ *
+ * Requires a scratch memory for pagetables and a virtual address for
+ * non identity mapped memory.
+ *
+ * The scratch memory area containing pagetables must not overlap with the
+ * memory range to be cleared.
+ * The scratch memory area containing pagetables must not overlap with the
+ * virtual address for non identity mapped memory.
+ *
+ * @param vmem_addr Where the virtual non identity mapped page resides, must
+ * be 2 aligned MiB and at least 2 MiB in size.
+ * Content at physical address is preserved.
+ * @param pgtbl Where pagetables reside, must be 4 KiB aligned and 20 KiB in
+ * size.
+ * Must not overlap memory range pointed to by dest.
+ * Must not overlap memory range pointed to by vmem_addr.
+ * Content at physical address isn't preserved.
+ * @param length The length of the memory segment to memset
+ * @param dest Physical memory address to memset
+ * @param pat The pattern to write to the pyhsical memory
+ * @return 0 on success, 1 on error
+ */
+int memset_pae(uint64_t dest, unsigned char pat, uint64_t length, void *pgtbl,
+ void *vmem_addr)
+{
+ struct pg_table *pgtbl_buf = (struct pg_table *)pgtbl;
+ ssize_t offset;
+
+ printk(BIOS_DEBUG, "%s: Using virtual address %p as scratchpad\n",
+ __func__, vmem_addr);
+ printk(BIOS_DEBUG, "%s: Using address %p for page tables\n",
+ __func__, pgtbl_buf);
+
+ /* Cover some basic error conditions */
+ if (!IS_ALIGNED((uintptr_t)pgtbl_buf, s4KiB) ||
+ !IS_ALIGNED((uintptr_t)vmem_addr, s2MiB)) {
+ printk(BIOS_ERR, "%s: Invalid alignment\n", __func__);
+ return 1;
+ }
+ const uintptr_t pgtbl_s = (uintptr_t)pgtbl_buf;
+ const uintptr_t pgtbl_e = pgtbl_s + sizeof(struct pg_table);
+
+ if (OVERLAP(dest, dest + length, pgtbl_s, pgtbl_e)) {
+ printk(BIOS_ERR, "%s: destination overlaps page tables\n",
+ __func__);
+ return 1;
+ }
+
+ if (OVERLAP((uintptr_t)vmem_addr, (uintptr_t)vmem_addr + s2MiB,
+ pgtbl_s, pgtbl_e)) {
+ printk(BIOS_ERR, "%s: vmem address overlaps page tables\n",
+ __func__);
+ return 1;
+ }
+
+ paging_disable_pae();
+
+ struct pde *pd = pgtbl_buf->pd, *pdp = pgtbl_buf->pdp;
+ /* Point the page directory pointers at the page directories. */
+ memset(pgtbl_buf->pdp, 0, sizeof(pgtbl_buf->pdp));
+
+ pdp[0].addr_lo = ((uintptr_t)&pd[512*0]) | PDPTE_PRES;
+ pdp[1].addr_lo = ((uintptr_t)&pd[512*1]) | PDPTE_PRES;
+ pdp[2].addr_lo = ((uintptr_t)&pd[512*2]) | PDPTE_PRES;
+ pdp[3].addr_lo = ((uintptr_t)&pd[512*3]) | PDPTE_PRES;
+
+ offset = dest - ALIGN_DOWN(dest, s2MiB);
+ dest = ALIGN_DOWN(dest, s2MiB);
+
+ /* Identity map the whole 32-bit address space */
+ for (size_t i = 0; i < 2048; i++) {
+ pd[i].addr_lo = (i << PDE_IDX_SHIFT) | PDE_PS | PDE_PRES | PDE_RW;
+ pd[i].addr_hi = 0;
+ }
+
+ /* Get pointer to PD that's not identity mapped */
+ pd = &pgtbl_buf->pd[((uintptr_t)vmem_addr) >> PDE_IDX_SHIFT];
+
+ paging_enable_pae_cr3((uintptr_t)pdp);
+
+ do {
+ const size_t len = MIN(length, s2MiB - offset);
+
+ /*
+ * Map a page using PAE at virtual address vmem_addr.
+ * dest is already 2 MiB aligned.
+ */
+ pd->addr_lo = dest | PDE_PS | PDE_PRES | PDE_RW;
+ pd->addr_hi = dest >> 32;
+
+ /* Update page tables */
+ asm volatile ("invlpg (%0)" :: "b"(vmem_addr) : "memory");
+
+ printk(BIOS_SPEW, "%s: Clearing %llx[%lx] - %zx\n", __func__,
+ dest + offset, (uintptr_t)vmem_addr + offset, len);
+
+ memset(vmem_addr + offset, pat, len);
+
+ dest += s2MiB;
+ length -= len;
+ offset = 0;
+ } while (length > 0);
+
+ paging_disable_pae();
+
+ return 0;
+}
+
#if ENV_RAMSTAGE
void *map_2M_page(unsigned long page)
{
diff --git a/src/include/cpu/x86/pae.h b/src/include/cpu/x86/pae.h
index 7627187a52..72bae53d68 100644
--- a/src/include/cpu/x86/pae.h
+++ b/src/include/cpu/x86/pae.h
@@ -1,3 +1,19 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2019 9elements Agency GmbH
+ * Copyright (C) 2019 Facebook Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
#ifndef CPU_X86_PAE_H
#define CPU_X86_PAE_H
@@ -41,4 +57,13 @@ int paging_identity_map_addr(uintptr_t base, size_t size, int pat);
#define MAPPING_ERROR ((void *)0xffffffffUL)
void *map_2M_page(unsigned long page);
+/* To be used with memset_pae */
+#define MEMSET_PAE_VMEM_ALIGN (2 * MiB)
+#define MEMSET_PAE_VMEM_SIZE (2 * MiB)
+#define MEMSET_PAE_PGTL_ALIGN (4 * KiB)
+#define MEMSET_PAE_PGTL_SIZE (20 * KiB)
+
+int memset_pae(uint64_t dest, unsigned char pat, uint64_t length, void *pgtbl,
+ void *vmem_addr);
+
#endif /* CPU_X86_PAE_H */