summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabe Black <gabeblack@google.com>2018-01-08 04:41:25 -0800
committerGabe Black <gabeblack@google.com>2018-01-23 20:14:48 +0000
commitdb8c55dede65e07cb9ea8e95c48badd2ea24462f (patch)
tree8b8b4fad738f3ecd3907bb6157517cc0e8a822eb
parent8cb6bb444a6ee0106807d0a22bbc63323b410bf8 (diff)
downloadgem5-db8c55dede65e07cb9ea8e95c48badd2ea24462f.tar.xz
x86, mem: Rewrite the multilevel page table class.
The new version extracts all the x86 specific aspects of the class, and builds the interface around a variable collection of template arguments which are classes that represent the different levels of the page table. The multilevel page table class is now much more ISA independent. Change-Id: Id42e168a78d0e70f80ab2438480cb6e00a3aa636 Reviewed-on: https://gem5-review.googlesource.com/7347 Reviewed-by: Brandon Potter <Brandon.Potter@amd.com> Maintainer: Gabe Black <gabeblack@google.com>
-rw-r--r--src/arch/x86/pagetable.hh147
-rw-r--r--src/arch/x86/process.cc34
-rw-r--r--src/arch/x86/process.hh2
-rw-r--r--src/mem/SConscript2
-rw-r--r--src/mem/multi_level_page_table.cc33
-rw-r--r--src/mem/multi_level_page_table.hh206
-rw-r--r--src/mem/multi_level_page_table_impl.hh231
-rw-r--r--src/mem/page_table.cc2
-rw-r--r--src/mem/page_table.hh3
-rw-r--r--src/sim/process.cc4
10 files changed, 262 insertions, 402 deletions
diff --git a/src/arch/x86/pagetable.hh b/src/arch/x86/pagetable.hh
index 490a2591b..f5e1a05f0 100644
--- a/src/arch/x86/pagetable.hh
+++ b/src/arch/x86/pagetable.hh
@@ -63,39 +63,6 @@ typedef Trie<Addr, X86ISA::TlbEntry> TlbEntryTrie;
namespace X86ISA
{
- BitUnion64(VAddr)
- Bitfield<20, 12> longl1;
- Bitfield<29, 21> longl2;
- Bitfield<38, 30> longl3;
- Bitfield<47, 39> longl4;
-
- Bitfield<20, 12> pael1;
- Bitfield<29, 21> pael2;
- Bitfield<31, 30> pael3;
-
- Bitfield<21, 12> norml1;
- Bitfield<31, 22> norml2;
- EndBitUnion(VAddr)
-
- // Unfortunately, the placement of the base field in a page table entry is
- // very erratic and would make a mess here. It might be moved here at some
- // point in the future.
- BitUnion64(PageTableEntry)
- Bitfield<63> nx;
- Bitfield<51, 12> base;
- Bitfield<11, 9> avl;
- Bitfield<8> g;
- Bitfield<7> ps;
- Bitfield<6> d;
- Bitfield<5> a;
- Bitfield<4> pcd;
- Bitfield<3> pwt;
- Bitfield<2> u;
- Bitfield<1> w;
- Bitfield<0> p;
- EndBitUnion(PageTableEntry)
-
-
struct TlbEntry : public Serializable
{
// The base of the physical page.
@@ -152,65 +119,87 @@ namespace X86ISA
void unserialize(CheckpointIn &cp) override;
};
- /** The size of each level of the page table expressed in base 2
- * logarithmic values
- */
- const std::vector<uint8_t> PageTableLayout = {9, 9, 9, 9};
-
- /* x86 specific PTE flags */
- enum PTEField{
- PTE_NotPresent = 1,
- PTE_Supervisor = 2,
- PTE_ReadOnly = 4,
- PTE_Uncacheable = 8,
- };
- /** Page table operations specific to x86 ISA.
- * Indended to be used as parameter of MultiLevelPageTable.
- */
- class PageTableOps
+ BitUnion64(VAddr)
+ Bitfield<20, 12> longl1;
+ Bitfield<29, 21> longl2;
+ Bitfield<38, 30> longl3;
+ Bitfield<47, 39> longl4;
+
+ Bitfield<20, 12> pael1;
+ Bitfield<29, 21> pael2;
+ Bitfield<31, 30> pael3;
+
+ Bitfield<21, 12> norml1;
+ Bitfield<31, 22> norml2;
+ EndBitUnion(VAddr)
+
+ // Unfortunately, the placement of the base field in a page table entry is
+ // very erratic and would make a mess here. It might be moved here at some
+ // point in the future.
+ BitUnion64(PageTableEntry)
+ Bitfield<63> nx;
+ Bitfield<51, 12> base;
+ Bitfield<11, 9> avl;
+ Bitfield<8> g;
+ Bitfield<7> ps;
+ Bitfield<6> d;
+ Bitfield<5> a;
+ Bitfield<4> pcd;
+ Bitfield<3> pwt;
+ Bitfield<2> u;
+ Bitfield<1> w;
+ Bitfield<0> p;
+ EndBitUnion(PageTableEntry)
+
+ template <int first, int last>
+ class LongModePTE
{
public:
- void setPTEFields(PageTableEntry& PTE, uint64_t flags = 0)
- {
- PTE.p = flags & PTE_NotPresent ? 0 : 1;
- PTE.pcd = flags & PTE_Uncacheable ? 1 : 0;
- PTE.w = flags & PTE_ReadOnly ? 0 : 1;
- PTE.u = flags & PTE_Supervisor ? 0 : 1;
- }
+ Addr paddr() { return pte.base << PageShift; }
+ void paddr(Addr addr) { pte.base = addr >> PageShift; }
- /** returns the page number out of a page table entry */
- Addr getPnum(PageTableEntry PTE)
- {
- return PTE.base;
- }
+ bool present() { return pte.p; }
+ void present(bool p) { pte.p = p ? 1 : 0; }
- bool isUncacheable(const PageTableEntry PTE)
- {
- return PTE.pcd;
- }
+ bool uncacheable() { return pte.pcd; }
+ void uncacheable(bool u) { pte.pcd = u ? 1 : 0; }
- bool isReadOnly(PageTableEntry PTE)
- {
- return !PTE.w;
- }
+ bool readonly() { return !pte.w; }
+ void readonly(bool r) { pte.w = r ? 0 : 1; }
- /** sets the page number in a page table entry */
- void setPnum(PageTableEntry& PTE, Addr paddr)
+ void
+ read(PortProxy &p, Addr table, Addr vaddr)
{
- PTE.base = paddr;
+ entryAddr = table;
+ entryAddr += bits(vaddr, first, last) * sizeof(PageTableEntry);
+ pte = p.read<PageTableEntry>(entryAddr);
}
- /** returns the offsets to index in every level of a page
- * table, contained in a virtual address
- */
- std::vector<uint64_t> getOffsets(Addr vaddr)
+ void
+ reset(Addr _paddr, bool _present=true,
+ bool _uncacheable=false, bool _readonly=false)
+ {
+ pte = 0;
+ pte.u = 1;
+ paddr(_paddr);
+ present(_present);
+ uncacheable(_uncacheable);
+ readonly(_readonly);
+ };
+
+ void write(PortProxy &p) { p.write(entryAddr, pte); }
+
+ static int
+ tableSize()
{
- X86ISA::VAddr addr(vaddr);
- return {addr.longl1, addr.longl2, addr.longl3, addr.longl4};
+ return 1 << ((first - last) + 4 - PageShift);
}
- };
+ protected:
+ PageTableEntry pte;
+ Addr entryAddr;
+ };
}
#endif
diff --git a/src/arch/x86/process.cc b/src/arch/x86/process.cc
index 43a5273d7..cfec21f39 100644
--- a/src/arch/x86/process.cc
+++ b/src/arch/x86/process.cc
@@ -96,14 +96,21 @@ static const int ArgumentReg32[] = {
static const int NumArgumentRegs32 M5_VAR_USED =
sizeof(ArgumentReg) / sizeof(const int);
+template class MultiLevelPageTable<LongModePTE<47, 39>,
+ LongModePTE<38, 30>,
+ LongModePTE<29, 21>,
+ LongModePTE<20, 12> >;
+typedef MultiLevelPageTable<LongModePTE<47, 39>,
+ LongModePTE<38, 30>,
+ LongModePTE<29, 21>,
+ LongModePTE<20, 12> > ArchPageTable;
+
X86Process::X86Process(ProcessParams *params, ObjectFile *objFile,
SyscallDesc *_syscallDescs, int _numSyscallDescs)
: Process(params, params->useArchPT ?
static_cast<EmulationPageTable *>(
- new ArchPageTable(
- params->name, params->pid,
- params->system, PageBytes,
- PageTableLayout)) :
+ new ArchPageTable(params->name, params->pid,
+ params->system, PageBytes)) :
new EmulationPageTable(params->name, params->pid,
PageBytes),
objFile),
@@ -543,23 +550,22 @@ X86_64Process::initState()
physProxy.writeBlob(pfHandlerPhysAddr, faultBlob, sizeof(faultBlob));
- MultiLevelPageTable<PageTableOps> *pt =
- dynamic_cast<MultiLevelPageTable<PageTableOps> *>(pTable);
-
/* Syscall handler */
- pt->map(syscallCodeVirtAddr, syscallCodePhysAddr, PageBytes, false);
+ pTable->map(syscallCodeVirtAddr, syscallCodePhysAddr,
+ PageBytes, false);
/* GDT */
- pt->map(GDTVirtAddr, gdtPhysAddr, PageBytes, false);
+ pTable->map(GDTVirtAddr, gdtPhysAddr, PageBytes, false);
/* IDT */
- pt->map(IDTVirtAddr, idtPhysAddr, PageBytes, false);
+ pTable->map(IDTVirtAddr, idtPhysAddr, PageBytes, false);
/* TSS */
- pt->map(TSSVirtAddr, tssPhysAddr, PageBytes, false);
+ pTable->map(TSSVirtAddr, tssPhysAddr, PageBytes, false);
/* IST */
- pt->map(ISTVirtAddr, istPhysAddr, PageBytes, false);
+ pTable->map(ISTVirtAddr, istPhysAddr, PageBytes, false);
/* PF handler */
- pt->map(PFHandlerVirtAddr, pfHandlerPhysAddr, PageBytes, false);
+ pTable->map(PFHandlerVirtAddr, pfHandlerPhysAddr, PageBytes, false);
/* MMIO region for m5ops */
- pt->map(MMIORegionVirtAddr, MMIORegionPhysAddr, 16*PageBytes, false);
+ pTable->map(MMIORegionVirtAddr, MMIORegionPhysAddr,
+ 16 * PageBytes, false);
} else {
for (int i = 0; i < contextIds.size(); i++) {
ThreadContext * tc = system->getThreadContext(contextIds[i]);
diff --git a/src/arch/x86/process.hh b/src/arch/x86/process.hh
index e5e18570d..31706cfdd 100644
--- a/src/arch/x86/process.hh
+++ b/src/arch/x86/process.hh
@@ -43,6 +43,7 @@
#include <string>
#include <vector>
+#include "arch/x86/pagetable.hh"
#include "mem/multi_level_page_table.hh"
#include "sim/aux_vector.hh"
#include "sim/process.hh"
@@ -65,7 +66,6 @@ namespace X86ISA
* These page tables are stored in system memory and respect x86
* specification.
*/
- typedef MultiLevelPageTable<PageTableOps> ArchPageTable;
Addr _gdtStart;
Addr _gdtSize;
diff --git a/src/mem/SConscript b/src/mem/SConscript
index 1d3249918..625eb0608 100644
--- a/src/mem/SConscript
+++ b/src/mem/SConscript
@@ -73,8 +73,6 @@ if env['TARGET_ISA'] != 'null':
Source('fs_translating_port_proxy.cc')
Source('se_translating_port_proxy.cc')
Source('page_table.cc')
-if env['TARGET_ISA'] == 'x86':
- Source('multi_level_page_table.cc')
if env['HAVE_DRAMSIM']:
SimObject('DRAMSim2.py')
diff --git a/src/mem/multi_level_page_table.cc b/src/mem/multi_level_page_table.cc
deleted file mode 100644
index 3980e72d0..000000000
--- a/src/mem/multi_level_page_table.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Alexandru Dutu
- */
-
-#include "mem/multi_level_page_table_impl.hh"
-
-template class MultiLevelPageTable<TheISA::PageTableOps>;
diff --git a/src/mem/multi_level_page_table.hh b/src/mem/multi_level_page_table.hh
index 30f534706..bd40d37c1 100644
--- a/src/mem/multi_level_page_table.hh
+++ b/src/mem/multi_level_page_table.hh
@@ -39,7 +39,6 @@
#include <string>
#include "base/types.hh"
-#include "config/the_isa.hh"
#include "mem/page_table.hh"
class System;
@@ -99,13 +98,90 @@ class System;
*
* @see MultiLevelPageTable
*/
-template <class ISAOps>
+
+namespace {
+
+template <class First, class ...Rest>
+Addr
+prepTopTable(System *system, Addr pageSize)
+{
+ Addr addr = system->allocPhysPages(First::tableSize());
+ PortProxy &p = system->physProxy;
+ p.memsetBlob(addr, 0, First::tableSize() * pageSize);
+ return addr;
+}
+
+template <class ...Types>
+struct LastType;
+
+template <class First, class Second, class ...Rest>
+struct LastType<First, Second, Rest...>
+{
+ typedef typename LastType<Second, Rest...>::type type;
+};
+
+template <class Only>
+struct LastType<Only>
+{
+ typedef Only type;
+};
+
+
+template <class ...Types>
+struct WalkWrapper;
+
+template <class Final, class Only>
+struct WalkWrapper<Final, Only>
+{
+ static void
+ walk(System *system, Addr pageSize, Addr table, Addr vaddr,
+ bool allocate, Final *entry)
+ {
+ entry->read(system->physProxy, table, vaddr);
+ }
+};
+
+template <class Final, class First, class Second, class ...Rest>
+struct WalkWrapper<Final, First, Second, Rest...>
+{
+ static void
+ walk(System *system, Addr pageSize, Addr table, Addr vaddr,
+ bool allocate, Final *entry)
+ {
+ First first;
+ first.read(system->physProxy, table, vaddr);
+
+ Addr next;
+ if (!first.present()) {
+ fatal_if(!allocate,
+ "Page fault while walking the page table.");
+ next = prepTopTable<Second>(system, pageSize);
+ first.reset(next);
+ first.write(system->physProxy);
+ } else {
+ next = first.paddr();
+ }
+ WalkWrapper<Final, Second, Rest...>::walk(
+ system, pageSize, next, vaddr, allocate, entry);
+ }
+};
+
+template <class ...EntryTypes>
+void
+walk(System *system, Addr pageSize, Addr table, Addr vaddr,
+ bool allocate, typename LastType<EntryTypes...>::type *entry)
+{
+ WalkWrapper<typename LastType<EntryTypes...>::type, EntryTypes...>::walk(
+ system, pageSize, table, vaddr, allocate, entry);
+}
+
+}
+
+
+template <class ...EntryTypes>
class MultiLevelPageTable : public EmulationPageTable
{
- /**
- * ISA specific operations
- */
- ISAOps pTableISAOps;
+ typedef typename LastType<EntryTypes...>::type Final;
/**
* Pointer to System object
@@ -117,41 +193,99 @@ class MultiLevelPageTable : public EmulationPageTable
*/
Addr _basePtr;
- /**
- * Vector with sizes of all levels in base 2 logarithmic
- */
- const std::vector<uint8_t> logLevelSize;
-
- /**
- * Number of levels contained by the page table
- */
- const uint64_t numLevels;
-
- /**
- * Method for walking the page table
- *
- * @param vaddr Virtual address that is being looked-up
- * @param allocate Specifies whether memory should be allocated while
- * walking the page table
- * @return PTE_addr The address of the found PTE
- */
- void walk(Addr vaddr, bool allocate, Addr &PTE_addr);
-
public:
MultiLevelPageTable(const std::string &__name, uint64_t _pid,
- System *_sys, Addr pageSize,
- const std::vector<uint8_t> &layout);
- ~MultiLevelPageTable();
+ System *_sys, Addr pageSize) :
+ EmulationPageTable(__name, _pid, pageSize), system(_sys)
+ {}
+
+ ~MultiLevelPageTable() {}
- void initState(ThreadContext* tc) override;
+ void
+ initState(ThreadContext* tc) override
+ {
+ _basePtr = prepTopTable<EntryTypes...>(system, pageSize);
+ }
Addr basePtr() { return _basePtr; }
- void map(Addr vaddr, Addr paddr, int64_t size,
- uint64_t flags = 0) override;
- void remap(Addr vaddr, int64_t size, Addr new_vaddr) override;
- void unmap(Addr vaddr, int64_t size) override;
- void serialize(CheckpointOut &cp) const override;
- void unserialize(CheckpointIn &cp) override;
+ void
+ map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags = 0) override
+ {
+ EmulationPageTable::map(vaddr, paddr, size, flags);
+
+ Final entry;
+
+ for (int64_t offset = 0; offset < size; offset += pageSize) {
+ walk<EntryTypes...>(system, pageSize, _basePtr,
+ vaddr + offset, true, &entry);
+
+ entry.reset(paddr + offset, true, flags & Uncacheable,
+ flags & ReadOnly);
+ entry.write(system->physProxy);
+
+ DPRINTF(MMU, "New mapping: %#x-%#x\n",
+ vaddr + offset, paddr + offset);
+ }
+ }
+
+ void
+ remap(Addr vaddr, int64_t size, Addr new_vaddr) override
+ {
+ EmulationPageTable::remap(vaddr, size, new_vaddr);
+
+ Final old_entry, new_entry;
+
+ for (int64_t offset = 0; offset < size; offset += pageSize) {
+ // Unmap the original mapping.
+ walk<EntryTypes...>(system, pageSize, _basePtr, vaddr + offset,
+ false, &old_entry);
+ old_entry.present(false);
+ old_entry.write(system->physProxy);
+
+ // Map the new one.
+ walk<EntryTypes...>(system, pageSize, _basePtr, new_vaddr + offset,
+ true, &new_entry);
+ new_entry.reset(old_entry.paddr(), true, old_entry.uncacheable(),
+ old_entry.readonly());
+ new_entry.write(system->physProxy);
+ }
+ }
+
+ void
+ unmap(Addr vaddr, int64_t size) override
+ {
+ EmulationPageTable::unmap(vaddr, size);
+
+ Final entry;
+
+ for (int64_t offset = 0; offset < size; offset += pageSize) {
+ walk<EntryTypes...>(system, pageSize, _basePtr,
+ vaddr + offset, false, &entry);
+ fatal_if(!entry.present(),
+ "PageTable::unmap: Address %#x not mapped.", vaddr);
+ entry.present(false);
+ entry.write(system->physProxy);
+ DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
+ }
+ }
+
+ void
+ serialize(CheckpointOut &cp) const override
+ {
+ EmulationPageTable::serialize(cp);
+ /** Since, the page table is stored in system memory
+ * which is serialized separately, we will serialize
+ * just the base pointer
+ */
+ paramOut(cp, "ptable.pointer", _basePtr);
+ }
+
+ void
+ unserialize(CheckpointIn &cp) override
+ {
+ EmulationPageTable::unserialize(cp);
+ paramIn(cp, "ptable.pointer", _basePtr);
+ }
};
#endif // __MEM_MULTI_LEVEL_PAGE_TABLE_HH__
diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh
deleted file mode 100644
index d756de658..000000000
--- a/src/mem/multi_level_page_table_impl.hh
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Alexandru Dutu
- */
-
-/**
- * @file
- * Definitions of page table
- */
-#include <string>
-
-#include "arch/isa_traits.hh"
-#include "base/trace.hh"
-#include "config/the_isa.hh"
-#include "debug/MMU.hh"
-#include "mem/multi_level_page_table.hh"
-#include "mem/page_table.hh"
-
-using namespace std;
-using namespace TheISA;
-
-template <class ISAOps>
-MultiLevelPageTable<ISAOps>::MultiLevelPageTable(
- const std::string &__name, uint64_t _pid, System *_sys,
- Addr pageSize, const std::vector<uint8_t> &layout)
- : EmulationPageTable(__name, _pid, pageSize), system(_sys),
- logLevelSize(layout), numLevels(logLevelSize.size())
-{
-}
-
-template <class ISAOps>
-MultiLevelPageTable<ISAOps>::~MultiLevelPageTable()
-{
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc)
-{
- /* setting first level of the page table */
- uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
- logLevelSize[numLevels - 1];
- assert(log_req_size >= PageShift);
- uint64_t npages = 1 << (log_req_size - PageShift);
-
- Addr _basePtr = system->allocPhysPages(npages);
-
- PortProxy &p = system->physProxy;
- p.memsetBlob(_basePtr, 0, npages << PageShift);
-}
-
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
-{
- std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr);
-
- Addr level_base = _basePtr;
- for (int i = numLevels - 1; i > 0; i--) {
-
- Addr entry_addr = (level_base<<PageShift) +
- offsets[i] * sizeof(PageTableEntry);
-
- PortProxy &p = system->physProxy;
- PageTableEntry entry = p.read<PageTableEntry>(entry_addr);
-
- Addr next_entry_pnum = pTableISAOps.getPnum(entry);
- if (next_entry_pnum == 0) {
-
- fatal_if(!allocate, "Page fault while walking the page table.");
-
- uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) +
- logLevelSize[i - 1];
- assert(log_req_size >= PageShift);
- uint64_t npages = 1 << (log_req_size - PageShift);
-
- DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n",
- npages, i - 1);
-
- /* allocate new entry */
- Addr next_entry_paddr = system->allocPhysPages(npages);
- p.memsetBlob(next_entry_paddr, 0, npages << PageShift);
-
- next_entry_pnum = next_entry_paddr >> PageShift;
- pTableISAOps.setPnum(entry, next_entry_pnum);
- pTableISAOps.setPTEFields(entry);
- p.write<PageTableEntry>(entry_addr, entry);
-
- }
- DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n",
- i, level_base, offsets[i], next_entry_pnum);
- level_base = next_entry_pnum;
-
- }
- PTE_addr = (level_base << PageShift) +
- offsets[0] * sizeof(PageTableEntry);
- DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr);
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
- int64_t size, uint64_t flags)
-{
- EmulationPageTable::map(vaddr, paddr, size, flags);
-
- PortProxy &p = system->physProxy;
-
- while (size > 0) {
- Addr PTE_addr;
- walk(vaddr, true, PTE_addr);
- PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
- pTableISAOps.setPnum(PTE, paddr >> PageShift);
- uint64_t PTE_flags = 0;
- if (flags & NotPresent)
- PTE_flags |= TheISA::PTE_NotPresent;
- if (flags & Uncacheable)
- PTE_flags |= TheISA::PTE_Uncacheable;
- if (flags & ReadOnly)
- PTE_flags |= TheISA::PTE_ReadOnly;
- pTableISAOps.setPTEFields(PTE, PTE_flags);
- p.write<PageTableEntry>(PTE_addr, PTE);
- DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
- size -= pageSize;
- vaddr += pageSize;
- paddr += pageSize;
- }
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
-{
- EmulationPageTable::remap(vaddr, size, new_vaddr);
-
- PortProxy &p = system->physProxy;
-
- while (size > 0) {
- Addr PTE_addr;
- walk(vaddr, false, PTE_addr);
- PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
- Addr paddr = pTableISAOps.getPnum(PTE);
-
- fatal_if(paddr == 0, "Page fault while remapping");
- /* unmapping vaddr */
- pTableISAOps.setPnum(PTE, 0);
- p.write<PageTableEntry>(PTE_addr, PTE);
-
- /* maping new_vaddr */
- Addr new_PTE_addr;
- walk(new_vaddr, true, new_PTE_addr);
- PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr);
-
- pTableISAOps.setPnum(new_PTE, paddr >> PageShift);
- pTableISAOps.setPTEFields(new_PTE);
- p.write<PageTableEntry>(new_PTE_addr, new_PTE);
- DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
- size -= pageSize;
- vaddr += pageSize;
- new_vaddr += pageSize;
- }
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
-{
- EmulationPageTable::unmap(vaddr, size);
-
- PortProxy &p = system->physProxy;
-
- while (size > 0) {
- Addr PTE_addr;
- walk(vaddr, false, PTE_addr);
- PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr);
- Addr paddr = pTableISAOps.getPnum(PTE);
- fatal_if(paddr == 0,
- "PageTable::allocate: address %#x not mapped", vaddr);
- pTableISAOps.setPnum(PTE, 0);
- p.write<PageTableEntry>(PTE_addr, PTE);
- DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
- size -= pageSize;
- vaddr += pageSize;
- }
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::serialize(CheckpointOut &cp) const
-{
- EmulationPageTable::serialize(cp);
- /** Since, the page table is stored in system memory
- * which is serialized separately, we will serialize
- * just the base pointer
- */
- paramOut(cp, "ptable.pointer", _basePtr);
-}
-
-template <class ISAOps>
-void
-MultiLevelPageTable<ISAOps>::unserialize(CheckpointIn &cp)
-{
- EmulationPageTable::unserialize(cp);
- paramIn(cp, "ptable.pointer", _basePtr);
-}
diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc
index 26cc6e537..ee504196a 100644
--- a/src/mem/page_table.cc
+++ b/src/mem/page_table.cc
@@ -70,7 +70,7 @@ EmulationPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
delete it->second;
} else {
// already mapped
- fatal("EmulationPageTable::allocate: addr %#x already mapped",
+ panic("EmulationPageTable::allocate: addr %#x already mapped",
vaddr);
}
} else {
diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh
index 470a3e7d6..733cdd2e3 100644
--- a/src/mem/page_table.hh
+++ b/src/mem/page_table.hh
@@ -78,14 +78,11 @@ class EmulationPageTable : public Serializable
/* generic page table mapping flags
* unset | set
* bit 0 - no-clobber | clobber
- * bit 1 - present | not-present
* bit 2 - cacheable | uncacheable
* bit 3 - read-write | read-only
*/
enum MappingFlags : uint32_t {
- Zero = 0,
Clobber = 1,
- NotPresent = 2,
Uncacheable = 4,
ReadOnly = 8,
};
diff --git a/src/sim/process.cc b/src/sim/process.cc
index 07c936e76..5e9c2b5e7 100644
--- a/src/sim/process.cc
+++ b/src/sim/process.cc
@@ -311,7 +311,7 @@ Process::allocateMem(Addr vaddr, int64_t size, bool clobber)
Addr paddr = system->allocPhysPages(npages);
pTable->map(vaddr, paddr, size,
clobber ? EmulationPageTable::Clobber :
- EmulationPageTable::Zero);
+ EmulationPageTable::MappingFlags(0));
}
void
@@ -406,7 +406,7 @@ bool
Process::map(Addr vaddr, Addr paddr, int size, bool cacheable)
{
pTable->map(vaddr, paddr, size,
- cacheable ? EmulationPageTable::Zero :
+ cacheable ? EmulationPageTable::MappingFlags(0) :
EmulationPageTable::Uncacheable);
return true;
}