diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/multi_level_page_table.cc | 33 | ||||
-rw-r--r-- | src/mem/multi_level_page_table.hh | 157 | ||||
-rw-r--r-- | src/mem/multi_level_page_table_impl.hh | 312 | ||||
-rw-r--r-- | src/mem/page_table.cc | 34 | ||||
-rw-r--r-- | src/mem/page_table.hh | 100 | ||||
-rw-r--r-- | src/mem/se_translating_port_proxy.hh | 2 | ||||
-rw-r--r-- | src/sim/process.cc | 6 | ||||
-rw-r--r-- | src/sim/process.hh | 3 |
8 files changed, 610 insertions, 37 deletions
diff --git a/src/mem/multi_level_page_table.cc b/src/mem/multi_level_page_table.cc new file mode 100644 index 000000000..3980e72d0 --- /dev/null +++ b/src/mem/multi_level_page_table.cc @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Alexandru Dutu + */ + +#include "mem/multi_level_page_table_impl.hh" + +template class MultiLevelPageTable<TheISA::PageTableOps>; diff --git a/src/mem/multi_level_page_table.hh b/src/mem/multi_level_page_table.hh new file mode 100644 index 000000000..9ae86924d --- /dev/null +++ b/src/mem/multi_level_page_table.hh @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Alexandru Dutu + */ + +/** + * @file + * Declaration of a multi-level page table. + */ + +#ifndef __MEM_MULTI_LEVEL_PAGE_TABLE_HH__ +#define __MEM_MULTI_LEVEL_PAGE_TABLE_HH__ + +#include <string> + +#include "arch/isa_traits.hh" +#include "arch/tlb.hh" +#include "base/types.hh" +#include "config/the_isa.hh" +#include "mem/page_table.hh" +#include "sim/serialize.hh" +#include "sim/system.hh" + +/** + * This class implements an in-memory multi-level page table that can be + * configured to follow ISA specifications. It can be used instead of the + * PageTable class in SE mode to allow CPU models (e.g. X86KvmCPU) + * to do a normal page table walk. + * + * To reduce memory required to store the page table, a multi-level page + * table stores its translations similarly with a radix tree. Let n be + * the number of levels and {Ln, Ln-1, ..., L1, L0} a set that specifies + * the number of entries for each level as base 2 logarithm values. A + * multi-level page table will store its translations at level 0 (the + * leaves of the tree) and it will be layed out in memory in the + * following way: + * + * +------------------------------+ + * level n |Ln-1_E0|Ln-1_E1|...|Ln-1_E2^Ln| + * +------------------------------+ + * / \ + * +------------------------+ +------------------------+ + * level n-1 |Ln-2_E0|...|Ln-2_E2^Ln-1| |Ln-2_E0|...|Ln-2_E2^Ln-1| + * +------------------------+ +------------------------+ + * / \ / \ + * . + * . + * . + * / / \ + * +------------------+ +------------+ +------------+ + * level 1 |L0_E1|...|L0_E2^L1| |...|L0_E2^L1| ... |...|L0_E2^L1| + * +------------------+ +------------+ +------------+ + * , where + * +------------------------------+ + * |Lk-1_E0|Lk-1_E1|...|Lk-1_E2^Lk| + * +------------------------------+ + * is a level k entry that holds 2^Lk entries in Lk-1 level. + * + * Essentially, a level n entry will contain 2^Ln level n-1 entries, + * a level n-1 entry will hold 2^Ln-1 level n-2 entries etc. + * + * The virtual address is split into offsets that index into the + * different levels of the page table. + * + * +--------------------------------+ + * |LnOffset|...|L1Offset|PageOffset| + * +--------------------------------+ + * + * For example L0Offset will be formed by the bits in range + * [log2(PageOffset), log2(PageOffset)+L0]. + * + * For every level of the page table, from n to 1, the base address + * of the entry is loaded, the offset in the virtual address for + * that particular level is used to index into the entry which + * will reveal the memory address of the entry in the next level. + * + * @see MultiLevelPageTable + */ +template <class ISAOps> +class MultiLevelPageTable : public PageTableBase +{ + /** + * ISA specific operations + */ + ISAOps pTableISAOps; + + /** + * Pointer to System object + */ + System *system; + + /** + * Physical address to the last level of the page table + */ + Addr basePtr; + + /** + * Vector with sizes of all levels in base 2 logarithmic + */ + const std::vector<uint8_t> logLevelSize; + + /** + * Number of levels contained by the page table + */ + const uint64_t numLevels; + + /** + * Method for walking the page table + * + * @param vaddr Virtual address that is being looked-up + * @param allocate Specifies whether memory should be allocated while + * walking the page table + * @return PTE_addr The address of the found PTE + * @retval true if the page table walk has succeded, false otherwhise + */ + bool walk(Addr vaddr, bool allocate, Addr &PTE_addr); + +public: + MultiLevelPageTable(const std::string &__name, uint64_t _pid, System *_sys); + ~MultiLevelPageTable(); + + void initState(ThreadContext* tc); + + void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false); + void remap(Addr vaddr, int64_t size, Addr new_vaddr); + void unmap(Addr vaddr, int64_t size); + bool isUnmapped(Addr vaddr, int64_t size); + bool lookup(Addr vaddr, TheISA::TlbEntry &entry); + void serialize(std::ostream &os); + void unserialize(Checkpoint *cp, const std::string §ion); +}; +#endif // __MEM_MULTI_LEVEL_PAGE_TABLE_HH__ diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh new file mode 100644 index 000000000..d944ff04b --- /dev/null +++ b/src/mem/multi_level_page_table_impl.hh @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Alexandru Dutu + */ + +/** + * @file + * Definitions of page table + */ +#include <fstream> +#include <map> +#include <string> + +#include "base/bitfield.hh" +#include "base/intmath.hh" +#include "base/trace.hh" +#include "config/the_isa.hh" +#include "debug/MMU.hh" +#include "mem/multi_level_page_table.hh" +#include "sim/faults.hh" +#include "sim/sim_object.hh" + +using namespace std; +using namespace TheISA; + +template <class ISAOps> +MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name, uint64_t _pid, System *_sys) + : PageTableBase(__name, _pid), system(_sys), + logLevelSize(PageTableLayout), + numLevels(logLevelSize.size()) +{ +} + +template <class ISAOps> +MultiLevelPageTable<ISAOps>::~MultiLevelPageTable() +{ +} + +template <class ISAOps> +void +MultiLevelPageTable<ISAOps>::initState(ThreadContext* tc) +{ + basePtr = pTableISAOps.getBasePtr(tc); + if (basePtr == 0) basePtr++; + DPRINTF(MMU, "basePtr: %d\n", basePtr); + + system->pagePtr = basePtr; + + /* setting first level of the page table */ + uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) + + logLevelSize[numLevels-1]; + assert(log_req_size >= LogVMPageSize); + uint64_t npages = 1 << (log_req_size - LogVMPageSize); + + Addr paddr = system->allocPhysPages(npages); + + PortProxy &p = system->physProxy; + p.memsetBlob(paddr, 0, npages << LogVMPageSize); +} + + +template <class ISAOps> +bool +MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr) +{ + std::vector<uint64_t> offsets = pTableISAOps.getOffsets(vaddr); + + Addr level_base = basePtr; + for (int i = numLevels - 1; i > 0; i--) { + + Addr entry_addr = (level_base<<LogVMPageSize) + + offsets[i] * sizeof(PageTableEntry); + + PortProxy &p = system->physProxy; + PageTableEntry entry = p.read<PageTableEntry>(entry_addr); + + Addr next_entry_pnum = pTableISAOps.getPnum(entry); + if (next_entry_pnum == 0) { + + if (!allocate) return false; + + uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) + + logLevelSize[i-1]; + assert(log_req_size >= LogVMPageSize); + uint64_t npages = 1 << (log_req_size - LogVMPageSize); + + DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n", npages, i-1); + + /* allocate new entry */ + Addr next_entry_paddr = system->allocPhysPages(npages); + p.memsetBlob(next_entry_paddr, 0, npages << LogVMPageSize); + + next_entry_pnum = next_entry_paddr >> LogVMPageSize; + pTableISAOps.setPnum(entry, next_entry_pnum); + pTableISAOps.setPTEFields(entry); + p.write<PageTableEntry>(entry_addr, entry); + + } + DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n", i, level_base, offsets[i], next_entry_pnum); + level_base = next_entry_pnum; + + } + PTE_addr = (level_base<<LogVMPageSize) + + offsets[0] * sizeof(PageTableEntry); + DPRINTF(MMU, "Returning PTE_addr: %x\n", PTE_addr); + return true; +} + +template <class ISAOps> +void +MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) +{ + // starting address must be page aligned + assert(pageOffset(vaddr) == 0); + + DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr + size); + + PortProxy &p = system->physProxy; + + for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) { + Addr PTE_addr; + if (walk(vaddr, true, PTE_addr)) { + PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr); + Addr entry_paddr = pTableISAOps.getPnum(PTE); + if (!clobber && entry_paddr == 0) { + pTableISAOps.setPnum(PTE, paddr >> LogVMPageSize); + pTableISAOps.setPTEFields(PTE); + p.write<PageTableEntry>(PTE_addr, PTE); + DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr); + } else { + fatal("address 0x%x already mapped to %x", vaddr, entry_paddr); + } + + eraseCacheEntry(vaddr); + updateCache(vaddr, TlbEntry(pid, vaddr, paddr)); + } + + } +} + +template <class ISAOps> +void +MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr) +{ + assert(pageOffset(vaddr) == 0); + assert(pageOffset(new_vaddr) == 0); + + DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr, + new_vaddr, size); + + PortProxy &p = system->physProxy; + + for (; size > 0; size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) { + Addr PTE_addr; + if (walk(vaddr, false, PTE_addr)) { + PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr); + Addr paddr = pTableISAOps.getPnum(PTE); + + if (paddr == 0) { + fatal("Page fault while remapping"); + } else { + /* unmapping vaddr */ + pTableISAOps.setPnum(PTE, 0); + p.write<PageTableEntry>(PTE_addr, PTE); + + /* maping new_vaddr */ + Addr new_PTE_addr; + walk(new_vaddr, true, new_PTE_addr); + PageTableEntry new_PTE = p.read<PageTableEntry>(new_PTE_addr); + + pTableISAOps.setPnum(new_PTE, paddr>>LogVMPageSize); + pTableISAOps.setPTEFields(new_PTE); + p.write<PageTableEntry>(new_PTE_addr, new_PTE); + DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr); + } + + eraseCacheEntry(vaddr); + updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr)); + } else { + fatal("Page fault while remapping"); + } + } +} + +template <class ISAOps> +void +MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size) +{ + assert(pageOffset(vaddr) == 0); + + DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size); + + PortProxy &p = system->physProxy; + + for (; size > 0; size -= pageSize, vaddr += pageSize) { + Addr PTE_addr; + if (walk(vaddr, false, PTE_addr)) { + PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr); + Addr paddr = pTableISAOps.getPnum(PTE); + if (paddr == 0) { + fatal("PageTable::allocate: address 0x%x not mapped", vaddr); + } else { + pTableISAOps.setPnum(PTE, 0); + p.write<PageTableEntry>(PTE_addr, PTE); + DPRINTF(MMU, "Unmapping: %#x\n", vaddr); + } + eraseCacheEntry(vaddr); + } else { + fatal("Page fault while unmapping"); + } + } + +} + +template <class ISAOps> +bool +MultiLevelPageTable<ISAOps>::isUnmapped(Addr vaddr, int64_t size) +{ + // starting address must be page aligned + assert(pageOffset(vaddr) == 0); + PortProxy &p = system->physProxy; + + for (; size > 0; size -= pageSize, vaddr += pageSize) { + Addr PTE_addr; + if (walk(vaddr, false, PTE_addr)) { + PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr); + if (pTableISAOps.getPnum(PTE) != 0) + return false; + } + } + + return true; +} + +template <class ISAOps> +bool +MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry) +{ + Addr page_addr = pageAlign(vaddr); + + if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) { + entry = pTableCache[0].entry; + return true; + } + if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) { + entry = pTableCache[1].entry; + return true; + } + if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) { + entry = pTableCache[2].entry; + return true; + } + + DPRINTF(MMU, "lookup page_addr: %#x\n", page_addr); + Addr PTE_addr; + if (walk(page_addr, false, PTE_addr)) { + PortProxy &p = system->physProxy; + PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr); + Addr pnum = pTableISAOps.getPnum(PTE); + if (pnum == 0) + return false; + + entry = TlbEntry(pid, vaddr, pnum << LogVMPageSize); + updateCache(page_addr, entry); + } else { + return false; + } + return true; +} + +template <class ISAOps> +void +MultiLevelPageTable<ISAOps>::serialize(std::ostream &os) +{ + /** Since, the page table is stored in system memory + * which is serialized separately, we will serialize + * just the base pointer + */ + paramOut(os, "ptable.pointer", basePtr); +} + +template <class ISAOps> +void +MultiLevelPageTable<ISAOps>::unserialize(Checkpoint *cp, const std::string §ion) +{ + paramIn(cp, section, "ptable.pointer", basePtr); +} diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc index cb7ddfe4b..8770abf98 100644 --- a/src/mem/page_table.cc +++ b/src/mem/page_table.cc @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2003 The Regents of The University of Michigan * All rights reserved. * @@ -32,7 +33,7 @@ /** * @file - * Definitions of page table. + * Definitions of functional page table. */ #include <fstream> #include <map> @@ -50,22 +51,17 @@ using namespace std; using namespace TheISA; -PageTable::PageTable(const std::string &__name, uint64_t _pid, Addr _pageSize) - : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))), - pid(_pid), _name(__name) +FuncPageTable::FuncPageTable(const std::string &__name, uint64_t _pid, Addr _pageSize) + : PageTableBase(__name, _pid, _pageSize) { - assert(isPowerOf2(pageSize)); - pTableCache[0].valid = false; - pTableCache[1].valid = false; - pTableCache[2].valid = false; } -PageTable::~PageTable() +FuncPageTable::~FuncPageTable() { } void -PageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) +FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) { // starting address must be page aligned assert(pageOffset(vaddr) == 0); @@ -75,7 +71,7 @@ PageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) { if (!clobber && (pTable.find(vaddr) != pTable.end())) { // already mapped - fatal("PageTable::allocate: address 0x%x already mapped", vaddr); + fatal("FuncPageTable::allocate: address 0x%x already mapped", vaddr); } pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr); @@ -85,7 +81,7 @@ PageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) } void -PageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr) +FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr) { assert(pageOffset(vaddr) == 0); assert(pageOffset(new_vaddr) == 0); @@ -105,7 +101,7 @@ PageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr) } void -PageTable::unmap(Addr vaddr, int64_t size) +FuncPageTable::unmap(Addr vaddr, int64_t size) { assert(pageOffset(vaddr) == 0); @@ -120,7 +116,7 @@ PageTable::unmap(Addr vaddr, int64_t size) } bool -PageTable::isUnmapped(Addr vaddr, int64_t size) +FuncPageTable::isUnmapped(Addr vaddr, int64_t size) { // starting address must be page aligned assert(pageOffset(vaddr) == 0); @@ -135,7 +131,7 @@ PageTable::isUnmapped(Addr vaddr, int64_t size) } bool -PageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry) +FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry) { Addr page_addr = pageAlign(vaddr); @@ -164,7 +160,7 @@ PageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry) } bool -PageTable::translate(Addr vaddr, Addr &paddr) +PageTableBase::translate(Addr vaddr, Addr &paddr) { TheISA::TlbEntry entry; if (!lookup(vaddr, entry)) { @@ -177,7 +173,7 @@ PageTable::translate(Addr vaddr, Addr &paddr) } Fault -PageTable::translate(RequestPtr req) +PageTableBase::translate(RequestPtr req) { Addr paddr; assert(pageAlign(req->getVaddr() + req->getSize() - 1) @@ -194,7 +190,7 @@ PageTable::translate(RequestPtr req) } void -PageTable::serialize(std::ostream &os) +FuncPageTable::serialize(std::ostream &os) { paramOut(os, "ptable.size", pTable.size()); @@ -215,7 +211,7 @@ PageTable::serialize(std::ostream &os) } void -PageTable::unserialize(Checkpoint *cp, const std::string §ion) +FuncPageTable::unserialize(Checkpoint *cp, const std::string §ion) { int i = 0, count; paramIn(cp, section, "ptable.size", count); diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh index ce3bfa5e1..010259a72 100644 --- a/src/mem/page_table.hh +++ b/src/mem/page_table.hh @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2003 The Regents of The University of Michigan * All rights reserved. * @@ -30,7 +31,7 @@ /** * @file - * Declaration of a non-full system Page Table. + * Declarations of a non-full system Page Table. */ #ifndef __MEM_PAGE_TABLE_HH__ @@ -45,17 +46,16 @@ #include "config/the_isa.hh" #include "mem/request.hh" #include "sim/serialize.hh" +#include "sim/system.hh" + +class ThreadContext; /** - * Page Table Declaration. + * Declaration of base class for page table */ -class PageTable +class PageTableBase { protected: - typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable; - typedef PTable::iterator PTableItr; - PTable pTable; - struct cacheElement { bool valid; Addr vaddr; @@ -72,10 +72,20 @@ class PageTable public: - PageTable(const std::string &__name, uint64_t _pid, - Addr _pageSize = TheISA::VMPageSize); + PageTableBase(const std::string &__name, uint64_t _pid, + Addr _pageSize = TheISA::VMPageSize) + : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))), + pid(_pid), _name(__name) + { + assert(isPowerOf2(pageSize)); + pTableCache[0].valid = false; + pTableCache[1].valid = false; + pTableCache[2].valid = false; + } - ~PageTable(); + virtual ~PageTableBase() {}; + + virtual void initState(ThreadContext* tc) = 0; // for DPRINTF compatibility const std::string name() const { return _name; } @@ -83,9 +93,9 @@ class PageTable Addr pageAlign(Addr a) { return (a & ~offsetMask); } Addr pageOffset(Addr a) { return (a & offsetMask); } - void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false); - void remap(Addr vaddr, int64_t size, Addr new_vaddr); - void unmap(Addr vaddr, int64_t size); + virtual void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false) = 0; + virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0; + virtual void unmap(Addr vaddr, int64_t size) = 0; /** * Check if any pages in a region are already allocated @@ -93,14 +103,14 @@ class PageTable * @param size The length of the region. * @return True if no pages in the region are mapped. */ - bool isUnmapped(Addr vaddr, int64_t size); + virtual bool isUnmapped(Addr vaddr, int64_t size) = 0; /** * Lookup function * @param vaddr The virtual address. * @return entry The page table entry corresponding to vaddr. */ - bool lookup(Addr vaddr, TheISA::TlbEntry &entry); + virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0; /** * Translate function @@ -160,9 +170,69 @@ class PageTable } } + virtual void serialize(std::ostream &os) = 0; + + virtual void unserialize(Checkpoint *cp, const std::string §ion) = 0; +}; + +/** + * Declaration of functional page table. + */ +class FuncPageTable : public PageTableBase +{ + private: + typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable; + typedef PTable::iterator PTableItr; + PTable pTable; + + public: + + FuncPageTable(const std::string &__name, uint64_t _pid, + Addr _pageSize = TheISA::VMPageSize); + + ~FuncPageTable(); + + void initState(ThreadContext* tc) + { + } + + void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false); + void remap(Addr vaddr, int64_t size, Addr new_vaddr); + void unmap(Addr vaddr, int64_t size); + + /** + * Check if any pages in a region are already allocated + * @param vaddr The starting virtual address of the region. + * @param size The length of the region. + * @return True if no pages in the region are mapped. + */ + bool isUnmapped(Addr vaddr, int64_t size); + + /** + * Lookup function + * @param vaddr The virtual address. + * @return entry The page table entry corresponding to vaddr. + */ + bool lookup(Addr vaddr, TheISA::TlbEntry &entry); + void serialize(std::ostream &os); void unserialize(Checkpoint *cp, const std::string §ion); }; +/** + * Faux page table class indended to stop the usage of + * an architectural page table, when there is none defined + * for a particular ISA. + */ +class NoArchPageTable : public FuncPageTable +{ + public: + NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys, + Addr _pageSize = TheISA::VMPageSize) : FuncPageTable(__name, _pid) + { + fatal("No architectural page table defined for this ISA.\n"); + } +}; + #endif // __MEM_PAGE_TABLE_HH__ diff --git a/src/mem/se_translating_port_proxy.hh b/src/mem/se_translating_port_proxy.hh index c0e522611..99973a6e9 100644 --- a/src/mem/se_translating_port_proxy.hh +++ b/src/mem/se_translating_port_proxy.hh @@ -75,7 +75,7 @@ class SETranslatingPortProxy : public PortProxy }; private: - PageTable *pTable; + PageTableBase *pTable; Process *process; AllocType allocating; diff --git a/src/sim/process.cc b/src/sim/process.cc index ccaac2096..d9f9a0fe6 100644 --- a/src/sim/process.cc +++ b/src/sim/process.cc @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2012 ARM Limited * All rights reserved * @@ -55,6 +56,7 @@ #include "config/the_isa.hh" #include "cpu/thread_context.hh" #include "mem/page_table.hh" +#include "mem/multi_level_page_table.hh" #include "mem/se_translating_port_proxy.hh" #include "params/LiveProcess.hh" #include "params/Process.hh" @@ -104,7 +106,7 @@ Process::Process(ProcessParams * params) : SimObject(params), system(params->system), max_stack_size(params->max_stack_size), M5_pid(system->allocatePID()), - pTable(new PageTable(name(), M5_pid)), + pTable(new FuncPageTable(name(), M5_pid)), initVirtMem(system->getSystemPort(), this, SETranslatingPortProxy::Always) { @@ -246,6 +248,8 @@ Process::initState() // mark this context as active so it will start ticking. tc->activate(Cycles(0)); + + pTable->initState(tc); } // map simulator fd sim_fd to target fd tgt_fd diff --git a/src/sim/process.hh b/src/sim/process.hh index be4c53dd9..361e07bca 100644 --- a/src/sim/process.hh +++ b/src/sim/process.hh @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 Advanced Micro Devices, Inc. * Copyright (c) 2001-2005 The Regents of The University of Michigan * All rights reserved. * @@ -124,7 +125,7 @@ class Process : public SimObject //separated. uint64_t M5_pid; - PageTable* pTable; + PageTableBase* pTable; class FdMap { |