summaryrefslogtreecommitdiff
path: root/src/mem/multi_level_page_table_impl.hh
diff options
context:
space:
mode:
authorAlexandru Dutu <alexandru.dutu@amd.com>2014-11-23 18:01:09 -0800
committerAlexandru Dutu <alexandru.dutu@amd.com>2014-11-23 18:01:09 -0800
commit1f539f13c32ad5a9187d56a098d4c857639b0e05 (patch)
tree7618c3b946d9c25d9b22018f226eee77b6de4aaf /src/mem/multi_level_page_table_impl.hh
parentc11bcb8119273ef91c40a25b8fd9471a887d0ee5 (diff)
downloadgem5-1f539f13c32ad5a9187d56a098d4c857639b0e05.tar.xz
mem: Page Table map api modification
This patch adds uncacheable/cacheable and read-only/read-write attributes to the map method of PageTableBase. It also modifies the constructor of TlbEntry structs for all architectures to consider the new attributes.
Diffstat (limited to 'src/mem/multi_level_page_table_impl.hh')
-rw-r--r--src/mem/multi_level_page_table_impl.hh24
1 files changed, 19 insertions, 5 deletions
diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh
index 3d8cbe75d..6714a170e 100644
--- a/src/mem/multi_level_page_table_impl.hh
+++ b/src/mem/multi_level_page_table_impl.hh
@@ -137,8 +137,9 @@ MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
template <class ISAOps>
void
MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
- int64_t size, bool clobber)
+ int64_t size, uint64_t flags)
{
+ bool clobber = flags & Clobber;
// starting address must be page aligned
assert(pageOffset(vaddr) == 0);
@@ -155,12 +156,21 @@ MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr);
}
pTableISAOps.setPnum(PTE, paddr >> PageShift);
- pTableISAOps.setPTEFields(PTE);
+ uint64_t PTE_flags = 0;
+ if (flags & NotPresent)
+ PTE_flags |= TheISA::PTE_NotPresent;
+ if (flags & Uncacheable)
+ PTE_flags |= TheISA::PTE_Uncacheable;
+ if (flags & ReadOnly)
+ PTE_flags |= TheISA::PTE_ReadOnly;
+ pTableISAOps.setPTEFields(PTE, PTE_flags);
p.write<PageTableEntry>(PTE_addr, PTE);
DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
eraseCacheEntry(vaddr);
- updateCache(vaddr, TlbEntry(pid, vaddr, paddr));
+ updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
+ flags & Uncacheable,
+ flags & ReadOnly));
}
}
@@ -205,7 +215,9 @@ MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
}
eraseCacheEntry(vaddr);
- updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr));
+ updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE)));
} else {
fatal("Page fault while remapping");
}
@@ -290,7 +302,9 @@ MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
if (pnum == 0)
return false;
- entry = TlbEntry(pid, vaddr, pnum << PageShift);
+ entry = TlbEntry(pid, vaddr, pnum << PageShift,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE));
updateCache(page_addr, entry);
} else {
return false;