summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/multi_level_page_table_impl.hh36
-rw-r--r--src/mem/page_table.cc63
-rw-r--r--src/mem/page_table.hh43
3 files changed, 83 insertions, 59 deletions
diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh
index 4ff5f5c61..ab7f0663f 100644
--- a/src/mem/multi_level_page_table_impl.hh
+++ b/src/mem/multi_level_page_table_impl.hh
@@ -164,10 +164,10 @@ MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
p.write<PageTableEntry>(PTE_addr, PTE);
DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
- eraseCacheEntry(vaddr);
- updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
- flags & Uncacheable,
- flags & ReadOnly));
+ delete eraseCacheEntry(vaddr);
+ delete updateCache(vaddr, new TlbEntry(pid, vaddr, paddr,
+ flags & Uncacheable,
+ flags & ReadOnly));
}
}
@@ -211,8 +211,9 @@ MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
}
- eraseCacheEntry(vaddr);
- updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
+ delete eraseCacheEntry(vaddr);
+ delete updateCache(new_vaddr,
+ new TlbEntry(pid, new_vaddr, paddr,
pTableISAOps.isUncacheable(PTE),
pTableISAOps.isReadOnly(PTE)));
} else {
@@ -243,7 +244,7 @@ MultiLevelPageTable<ISAOps>::unmap(Addr vaddr, int64_t size)
p.write<PageTableEntry>(PTE_addr, PTE);
DPRINTF(MMU, "Unmapping: %#x\n", vaddr);
}
- eraseCacheEntry(vaddr);
+ delete eraseCacheEntry(vaddr);
} else {
fatal("Page fault while unmapping");
}
@@ -277,16 +278,16 @@ MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
{
Addr page_addr = pageAlign(vaddr);
- if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
- entry = pTableCache[0].entry;
+ if (pTableCache[0].entry && pTableCache[0].vaddr == page_addr) {
+ entry = *pTableCache[0].entry;
return true;
}
- if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
- entry = pTableCache[1].entry;
+ if (pTableCache[1].entry && pTableCache[1].vaddr == page_addr) {
+ entry = *pTableCache[1].entry;
return true;
}
- if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
- entry = pTableCache[2].entry;
+ if (pTableCache[2].entry && pTableCache[2].vaddr == page_addr) {
+ entry = *pTableCache[2].entry;
return true;
}
@@ -299,10 +300,11 @@ MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
if (pnum == 0)
return false;
- entry = TlbEntry(pid, vaddr, pnum << PageShift,
- pTableISAOps.isUncacheable(PTE),
- pTableISAOps.isReadOnly(PTE));
- updateCache(page_addr, entry);
+ TlbEntry *new_entry = new TlbEntry(pid, vaddr, pnum << PageShift,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE));
+ entry = *new_entry;
+ delete updateCache(page_addr, new_entry);
} else {
return false;
}
diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc
index 0abe8eaf3..4f06c29e8 100644
--- a/src/mem/page_table.cc
+++ b/src/mem/page_table.cc
@@ -56,6 +56,8 @@ FuncPageTable::FuncPageTable(const std::string &__name,
FuncPageTable::~FuncPageTable()
{
+ for (auto &iter : pTable)
+ delete iter.second;
}
void
@@ -68,12 +70,20 @@ FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
- if (!clobber && (pTable.find(vaddr) != pTable.end())) {
- // already mapped
- fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
+ auto it = pTable.find(vaddr);
+ if (it != pTable.end()) {
+ if (clobber) {
+ delete it->second;
+ } else {
+ // already mapped
+ fatal("FuncPageTable::allocate: addr %#x already mapped",
+ vaddr);
+ }
+ } else {
+ it = pTable.emplace(vaddr, nullptr).first;
}
- pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
+ it->second = new TheISA::TlbEntry(pid, vaddr, paddr,
flags & Uncacheable,
flags & ReadOnly);
eraseCacheEntry(vaddr);
@@ -93,13 +103,15 @@ FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
for (; size > 0;
size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
{
- assert(pTable.find(vaddr) != pTable.end());
+ auto new_it = pTable.find(new_vaddr);
+ auto old_it = pTable.find(vaddr);
+ assert(old_it != pTable.end() && new_it == pTable.end());
- pTable[new_vaddr] = pTable[vaddr];
- pTable.erase(vaddr);
+ new_it->second = old_it->second;
+ pTable.erase(old_it);
eraseCacheEntry(vaddr);
- pTable[new_vaddr].updateVaddr(new_vaddr);
- updateCache(new_vaddr, pTable[new_vaddr]);
+ new_it->second->updateVaddr(new_vaddr);
+ updateCache(new_vaddr, new_it->second);
}
}
@@ -107,7 +119,7 @@ void
FuncPageTable::getMappings(std::vector<std::pair<Addr, Addr>> *addr_maps)
{
for (auto &iter : pTable)
- addr_maps->push_back(make_pair(iter.first, iter.second.pageStart()));
+ addr_maps->push_back(make_pair(iter.first, iter.second->pageStart()));
}
void
@@ -118,9 +130,11 @@ FuncPageTable::unmap(Addr vaddr, int64_t size)
DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
for (; size > 0; size -= pageSize, vaddr += pageSize) {
- assert(pTable.find(vaddr) != pTable.end());
- pTable.erase(vaddr);
+ auto it = pTable.find(vaddr);
+ assert(it != pTable.end());
eraseCacheEntry(vaddr);
+ delete it->second;
+ pTable.erase(it);
}
}
@@ -145,16 +159,16 @@ FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
{
Addr page_addr = pageAlign(vaddr);
- if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
- entry = pTableCache[0].entry;
+ if (pTableCache[0].entry && pTableCache[0].vaddr == page_addr) {
+ entry = *pTableCache[0].entry;
return true;
}
- if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
- entry = pTableCache[1].entry;
+ if (pTableCache[1].entry && pTableCache[1].vaddr == page_addr) {
+ entry = *pTableCache[1].entry;
return true;
}
- if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
- entry = pTableCache[2].entry;
+ if (pTableCache[2].entry && pTableCache[2].vaddr == page_addr) {
+ entry = *pTableCache[2].entry;
return true;
}
@@ -165,7 +179,7 @@ FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
}
updateCache(page_addr, iter->second);
- entry = iter->second;
+ entry = *iter->second;
return true;
}
@@ -209,7 +223,7 @@ FuncPageTable::serialize(CheckpointOut &cp) const
ScopedCheckpointSection sec(cp, csprintf("Entry%d", count++));
paramOut(cp, "vaddr", pte.first);
- pte.second.serialize(cp);
+ pte.second->serialize(cp);
}
assert(count == pTable.size());
}
@@ -223,14 +237,13 @@ FuncPageTable::unserialize(CheckpointIn &cp)
for (int i = 0; i < count; ++i) {
ScopedCheckpointSection sec(cp, csprintf("Entry%d", i));
- std::unique_ptr<TheISA::TlbEntry> entry;
- Addr vaddr;
+ TheISA::TlbEntry *entry = new TheISA::TlbEntry();
+ entry->unserialize(cp);
+ Addr vaddr;
paramIn(cp, "vaddr", vaddr);
- entry.reset(new TheISA::TlbEntry());
- entry->unserialize(cp);
- pTable[vaddr] = *entry;
+ pTable[vaddr] = entry;
}
}
diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh
index f784b2166..fa584873a 100644
--- a/src/mem/page_table.hh
+++ b/src/mem/page_table.hh
@@ -58,9 +58,8 @@ class PageTableBase : public Serializable
{
protected:
struct cacheElement {
- bool valid;
Addr vaddr;
- TheISA::TlbEntry entry;
+ TheISA::TlbEntry *entry;
};
struct cacheElement pTableCache[3];
@@ -78,9 +77,9 @@ class PageTableBase : public Serializable
pid(_pid), _name(__name)
{
assert(isPowerOf2(pageSize));
- pTableCache[0].valid = false;
- pTableCache[1].valid = false;
- pTableCache[2].valid = false;
+ pTableCache[0].entry = nullptr;
+ pTableCache[1].entry = nullptr;
+ pTableCache[2].entry = nullptr;
}
virtual ~PageTableBase() {};
@@ -162,36 +161,46 @@ class PageTableBase : public Serializable
* Update the page table cache.
* @param vaddr virtual address (page aligned) to check
* @param pte page table entry to return
+ * @return A pointer to any entry which is displaced from the cache.
*/
- inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
+ TheISA::TlbEntry *
+ updateCache(Addr vaddr, TheISA::TlbEntry *entry)
{
+ TheISA::TlbEntry *evicted = pTableCache[2].entry;
+
pTableCache[2].entry = pTableCache[1].entry;
pTableCache[2].vaddr = pTableCache[1].vaddr;
- pTableCache[2].valid = pTableCache[1].valid;
pTableCache[1].entry = pTableCache[0].entry;
pTableCache[1].vaddr = pTableCache[0].vaddr;
- pTableCache[1].valid = pTableCache[0].valid;
pTableCache[0].entry = entry;
pTableCache[0].vaddr = vaddr;
- pTableCache[0].valid = true;
+
+ return evicted;
}
/**
* Erase an entry from the page table cache.
* @param vaddr virtual address (page aligned) to check
+ * @return A pointer to the entry (if any) which is kicked out.
*/
- inline void eraseCacheEntry(Addr vaddr)
+ TheISA::TlbEntry *
+ eraseCacheEntry(Addr vaddr)
{
+ TheISA::TlbEntry *evicted = nullptr;
// Invalidate cached entries if necessary
- if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
- pTableCache[0].valid = false;
- } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
- pTableCache[1].valid = false;
- } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
- pTableCache[2].valid = false;
+ if (pTableCache[0].entry && pTableCache[0].vaddr == vaddr) {
+ evicted = pTableCache[0].entry;
+ pTableCache[0].entry = nullptr;
+ } else if (pTableCache[1].entry && pTableCache[1].vaddr == vaddr) {
+ evicted = pTableCache[1].entry;
+ pTableCache[1].entry = nullptr;
+ } else if (pTableCache[2].entry && pTableCache[2].vaddr == vaddr) {
+ evicted = pTableCache[2].entry;
+ pTableCache[2].entry = nullptr;
}
+ return evicted;
}
virtual void getMappings(std::vector<std::pair<Addr, Addr>>
@@ -204,7 +213,7 @@ class PageTableBase : public Serializable
class FuncPageTable : public PageTableBase
{
private:
- typedef std::unordered_map<Addr, TheISA::TlbEntry> PTable;
+ typedef std::unordered_map<Addr, TheISA::TlbEntry *> PTable;
typedef PTable::iterator PTableItr;
PTable pTable;