summaryrefslogtreecommitdiff
path: root/src/mem/page_table.cc
diff options
context:
space:
mode:
authorGabe Black <gabeblack@google.com>2017-12-22 19:21:30 -0800
committerGabe Black <gabeblack@google.com>2018-01-15 22:36:41 +0000
commitb8b13206c8d45cdcc16157d137845706bae915dd (patch)
tree646ceec50e38c2dd41614bd2ccf05e91adc0213b /src/mem/page_table.cc
parent3e8d76e2e23ab23d68e569e9b2c4498a06c19f59 (diff)
downloadgem5-b8b13206c8d45cdcc16157d137845706bae915dd.tar.xz
mem: Track TLB entries in the lookup cache as pointers.
Using the architectural page table on x86 and the functional page table on ARM, both with the twolf benchmark in SE mode, there was no performance penalty for doing so, and again possibly a performance improvement. By using a pointer instead of an inline instance, it's possible for the actual type of the TLB entry to be hidden somewhat, taking a step towards abstracting away another aspect of the ISAs. Since the TLB entries are no longer overwritten and now need to be allocated and freed, this change introduces return types from the updateCache and eraseCacheEntry functions. These functions will return the pointer to any entry which has been displaced from the cache which the caller can either free or ignore, depending on whether the entry has a purpose outside of the cache. Because the functional page table stores its entries over a longer time period, it will generally not delete the pointer returned from those functions. The "architechtural" page table, ie the one which is backed by memory, doesn't have any other use for the TlbEntrys and will delete them. That leads to more news and deletes than there used to be. To address that, and also to speed up the architectural page table in general, it would be a good idea to augment the functional page table with an image of the table in memory, instead of replacing it with one. The functional page table would provide quick lookups and also avoid having to translate page table entries to TLB entries, making performance essentially equivalent to the functional case. The backing page tables, which are primarily for consumption by the physical hardware when in KVM, can be updated when mappings change but otherwise left alone. If we end up doing that, we could just let the ISA specific process classes enable whatever additional TLB machinery they need, likely a backing copy in memory, without any knowledge or involvement from the ISA agnostic class. We would be able to get rid of the useArchPT setting and the bits of code in the configs which set it. Change-Id: I2e21945cd852bb1b3d0740fe6a4c5acbfd9548c5 Reviewed-on: https://gem5-review.googlesource.com/6983 Maintainer: Gabe Black <gabeblack@google.com> Reviewed-by: Brandon Potter <Brandon.Potter@amd.com> Reviewed-by: Jason Lowe-Power <jason@lowepower.com> Reviewed-by: Anthony Gutierrez <anthony.gutierrez@amd.com>
Diffstat (limited to 'src/mem/page_table.cc')
-rw-r--r--src/mem/page_table.cc63
1 files changed, 38 insertions, 25 deletions
diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc
index 0abe8eaf3..4f06c29e8 100644
--- a/src/mem/page_table.cc
+++ b/src/mem/page_table.cc
@@ -56,6 +56,8 @@ FuncPageTable::FuncPageTable(const std::string &__name,
FuncPageTable::~FuncPageTable()
{
+ for (auto &iter : pTable)
+ delete iter.second;
}
void
@@ -68,12 +70,20 @@ FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
- if (!clobber && (pTable.find(vaddr) != pTable.end())) {
- // already mapped
- fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
+ auto it = pTable.find(vaddr);
+ if (it != pTable.end()) {
+ if (clobber) {
+ delete it->second;
+ } else {
+ // already mapped
+ fatal("FuncPageTable::allocate: addr %#x already mapped",
+ vaddr);
+ }
+ } else {
+ it = pTable.emplace(vaddr, nullptr).first;
}
- pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
+ it->second = new TheISA::TlbEntry(pid, vaddr, paddr,
flags & Uncacheable,
flags & ReadOnly);
eraseCacheEntry(vaddr);
@@ -93,13 +103,15 @@ FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
for (; size > 0;
size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
{
- assert(pTable.find(vaddr) != pTable.end());
+ auto new_it = pTable.find(new_vaddr);
+ auto old_it = pTable.find(vaddr);
+ assert(old_it != pTable.end() && new_it == pTable.end());
- pTable[new_vaddr] = pTable[vaddr];
- pTable.erase(vaddr);
+ new_it->second = old_it->second;
+ pTable.erase(old_it);
eraseCacheEntry(vaddr);
- pTable[new_vaddr].updateVaddr(new_vaddr);
- updateCache(new_vaddr, pTable[new_vaddr]);
+ new_it->second->updateVaddr(new_vaddr);
+ updateCache(new_vaddr, new_it->second);
}
}
@@ -107,7 +119,7 @@ void
FuncPageTable::getMappings(std::vector<std::pair<Addr, Addr>> *addr_maps)
{
for (auto &iter : pTable)
- addr_maps->push_back(make_pair(iter.first, iter.second.pageStart()));
+ addr_maps->push_back(make_pair(iter.first, iter.second->pageStart()));
}
void
@@ -118,9 +130,11 @@ FuncPageTable::unmap(Addr vaddr, int64_t size)
DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
for (; size > 0; size -= pageSize, vaddr += pageSize) {
- assert(pTable.find(vaddr) != pTable.end());
- pTable.erase(vaddr);
+ auto it = pTable.find(vaddr);
+ assert(it != pTable.end());
eraseCacheEntry(vaddr);
+ delete it->second;
+ pTable.erase(it);
}
}
@@ -145,16 +159,16 @@ FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
{
Addr page_addr = pageAlign(vaddr);
- if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
- entry = pTableCache[0].entry;
+ if (pTableCache[0].entry && pTableCache[0].vaddr == page_addr) {
+ entry = *pTableCache[0].entry;
return true;
}
- if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
- entry = pTableCache[1].entry;
+ if (pTableCache[1].entry && pTableCache[1].vaddr == page_addr) {
+ entry = *pTableCache[1].entry;
return true;
}
- if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
- entry = pTableCache[2].entry;
+ if (pTableCache[2].entry && pTableCache[2].vaddr == page_addr) {
+ entry = *pTableCache[2].entry;
return true;
}
@@ -165,7 +179,7 @@ FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
}
updateCache(page_addr, iter->second);
- entry = iter->second;
+ entry = *iter->second;
return true;
}
@@ -209,7 +223,7 @@ FuncPageTable::serialize(CheckpointOut &cp) const
ScopedCheckpointSection sec(cp, csprintf("Entry%d", count++));
paramOut(cp, "vaddr", pte.first);
- pte.second.serialize(cp);
+ pte.second->serialize(cp);
}
assert(count == pTable.size());
}
@@ -223,14 +237,13 @@ FuncPageTable::unserialize(CheckpointIn &cp)
for (int i = 0; i < count; ++i) {
ScopedCheckpointSection sec(cp, csprintf("Entry%d", i));
- std::unique_ptr<TheISA::TlbEntry> entry;
- Addr vaddr;
+ TheISA::TlbEntry *entry = new TheISA::TlbEntry();
+ entry->unserialize(cp);
+ Addr vaddr;
paramIn(cp, "vaddr", vaddr);
- entry.reset(new TheISA::TlbEntry());
- entry->unserialize(cp);
- pTable[vaddr] = *entry;
+ pTable[vaddr] = entry;
}
}