summaryrefslogtreecommitdiff
path: root/src/mem/page_table.cc
diff options
context:
space:
mode:
authorMitch Hayenga <mitch.hayenga+gem5@gmail.com>2013-04-23 09:47:52 -0400
committerMitch Hayenga <mitch.hayenga+gem5@gmail.com>2013-04-23 09:47:52 -0400
commitb222ba2fd32c5ac9c91cfab518382761cac5b2c7 (patch)
tree09e3dddac2413aa7cf6ea4b58faab9a7bd4708d4 /src/mem/page_table.cc
parent3e35fa5dcc8af49f652a20595ce2470b5a116ddc (diff)
downloadgem5-b222ba2fd32c5ac9c91cfab518382761cac5b2c7.tar.xz
sim: Fix two bugs relating to software caching of PageTable entries.
The existing implementation can read uninitialized data or stale information from the cached PageTable entries. 1) Add a valid bit for the cache entries. Simply using zero for the virtual address to signify invalid entries is not sufficient. Speculative, wrong-path accesses frequently access page zero. The current implementation would return a uninitialized TLB entry when address zero was accessed and the PageTable cache entry was invalid. 2) When unmapping/mapping/remaping a page, invalidate the corresponding PageTable cache entry if one already exists.
Diffstat (limited to 'src/mem/page_table.cc')
-rw-r--r--src/mem/page_table.cc16
1 files changed, 9 insertions, 7 deletions
diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc
index be862e429..cb7ddfe4b 100644
--- a/src/mem/page_table.cc
+++ b/src/mem/page_table.cc
@@ -55,9 +55,9 @@ PageTable::PageTable(const std::string &__name, uint64_t _pid, Addr _pageSize)
pid(_pid), _name(__name)
{
assert(isPowerOf2(pageSize));
- pTableCache[0].vaddr = 0;
- pTableCache[1].vaddr = 0;
- pTableCache[2].vaddr = 0;
+ pTableCache[0].valid = false;
+ pTableCache[1].valid = false;
+ pTableCache[2].valid = false;
}
PageTable::~PageTable()
@@ -79,6 +79,7 @@ PageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
}
pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr);
+ eraseCacheEntry(vaddr);
updateCache(vaddr, pTable[vaddr]);
}
}
@@ -97,6 +98,7 @@ PageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
pTable[new_vaddr] = pTable[vaddr];
pTable.erase(vaddr);
+ eraseCacheEntry(vaddr);
pTable[new_vaddr].updateVaddr(new_vaddr);
updateCache(new_vaddr, pTable[new_vaddr]);
}
@@ -111,8 +113,8 @@ PageTable::unmap(Addr vaddr, int64_t size)
for (; size > 0; size -= pageSize, vaddr += pageSize) {
assert(pTable.find(vaddr) != pTable.end());
-
pTable.erase(vaddr);
+ eraseCacheEntry(vaddr);
}
}
@@ -137,15 +139,15 @@ PageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
{
Addr page_addr = pageAlign(vaddr);
- if (pTableCache[0].vaddr == page_addr) {
+ if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
entry = pTableCache[0].entry;
return true;
}
- if (pTableCache[1].vaddr == page_addr) {
+ if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
entry = pTableCache[1].entry;
return true;
}
- if (pTableCache[2].vaddr == page_addr) {
+ if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
entry = pTableCache[2].entry;
return true;
}