summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/multi_level_page_table.hh3
-rw-r--r--src/mem/multi_level_page_table_impl.hh24
-rw-r--r--src/mem/page_table.cc7
-rw-r--r--src/mem/page_table.hh27
4 files changed, 51 insertions, 10 deletions
diff --git a/src/mem/multi_level_page_table.hh b/src/mem/multi_level_page_table.hh
index 8d9febac8..232121c21 100644
--- a/src/mem/multi_level_page_table.hh
+++ b/src/mem/multi_level_page_table.hh
@@ -147,7 +147,8 @@ public:
void initState(ThreadContext* tc);
- void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false);
+ void map(Addr vaddr, Addr paddr, int64_t size,
+ uint64_t flags = 0);
void remap(Addr vaddr, int64_t size, Addr new_vaddr);
void unmap(Addr vaddr, int64_t size);
bool isUnmapped(Addr vaddr, int64_t size);
diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh
index 3d8cbe75d..6714a170e 100644
--- a/src/mem/multi_level_page_table_impl.hh
+++ b/src/mem/multi_level_page_table_impl.hh
@@ -137,8 +137,9 @@ MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr)
template <class ISAOps>
void
MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
- int64_t size, bool clobber)
+ int64_t size, uint64_t flags)
{
+ bool clobber = flags & Clobber;
// starting address must be page aligned
assert(pageOffset(vaddr) == 0);
@@ -155,12 +156,21 @@ MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr,
fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr);
}
pTableISAOps.setPnum(PTE, paddr >> PageShift);
- pTableISAOps.setPTEFields(PTE);
+ uint64_t PTE_flags = 0;
+ if (flags & NotPresent)
+ PTE_flags |= TheISA::PTE_NotPresent;
+ if (flags & Uncacheable)
+ PTE_flags |= TheISA::PTE_Uncacheable;
+ if (flags & ReadOnly)
+ PTE_flags |= TheISA::PTE_ReadOnly;
+ pTableISAOps.setPTEFields(PTE, PTE_flags);
p.write<PageTableEntry>(PTE_addr, PTE);
DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr);
eraseCacheEntry(vaddr);
- updateCache(vaddr, TlbEntry(pid, vaddr, paddr));
+ updateCache(vaddr, TlbEntry(pid, vaddr, paddr,
+ flags & Uncacheable,
+ flags & ReadOnly));
}
}
@@ -205,7 +215,9 @@ MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr)
}
eraseCacheEntry(vaddr);
- updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr));
+ updateCache(new_vaddr, TlbEntry(pid, new_vaddr, paddr,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE)));
} else {
fatal("Page fault while remapping");
}
@@ -290,7 +302,9 @@ MultiLevelPageTable<ISAOps>::lookup(Addr vaddr, TlbEntry &entry)
if (pnum == 0)
return false;
- entry = TlbEntry(pid, vaddr, pnum << PageShift);
+ entry = TlbEntry(pid, vaddr, pnum << PageShift,
+ pTableISAOps.isUncacheable(PTE),
+ pTableISAOps.isReadOnly(PTE));
updateCache(page_addr, entry);
} else {
return false;
diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc
index fdea1fbad..d3af09e1e 100644
--- a/src/mem/page_table.cc
+++ b/src/mem/page_table.cc
@@ -62,8 +62,9 @@ FuncPageTable::~FuncPageTable()
}
void
-FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
+FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
{
+ bool clobber = flags & Clobber;
// starting address must be page aligned
assert(pageOffset(vaddr) == 0);
@@ -75,7 +76,9 @@ FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
}
- pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr);
+ pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
+ flags & Uncacheable,
+ flags & ReadOnly);
eraseCacheEntry(vaddr);
updateCache(vaddr, pTable[vaddr]);
}
diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh
index be0983996..22b5c61eb 100644
--- a/src/mem/page_table.hh
+++ b/src/mem/page_table.hh
@@ -85,6 +85,20 @@ class PageTableBase
virtual ~PageTableBase() {};
+ /* generic page table mapping flags
+ * unset | set
+ * bit 0 - no-clobber | clobber
+ * bit 1 - present | not-present
+ * bit 2 - cacheable | uncacheable
+ * bit 3 - read-write | read-only
+ */
+ enum MappingFlags : uint32_t {
+ Clobber = 1,
+ NotPresent = 2,
+ Uncacheable = 4,
+ ReadOnly = 8,
+ };
+
virtual void initState(ThreadContext* tc) = 0;
// for DPRINTF compatibility
@@ -93,8 +107,16 @@ class PageTableBase
Addr pageAlign(Addr a) { return (a & ~offsetMask); }
Addr pageOffset(Addr a) { return (a & offsetMask); }
+ /**
+ * Maps a virtual memory region to a physical memory region.
+ * @param vaddr The starting virtual address of the region.
+ * @param paddr The starting physical address where the region is mapped.
+ * @param size The length of the region.
+ * @param flags Generic mapping flags that can be set by or-ing values
+ * from MappingFlags enum.
+ */
virtual void map(Addr vaddr, Addr paddr, int64_t size,
- bool clobber = false) = 0;
+ uint64_t flags = 0) = 0;
virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
virtual void unmap(Addr vaddr, int64_t size) = 0;
@@ -197,7 +219,8 @@ class FuncPageTable : public PageTableBase
{
}
- void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false);
+ void map(Addr vaddr, Addr paddr, int64_t size,
+ uint64_t flags = 0);
void remap(Addr vaddr, int64_t size, Addr new_vaddr);
void unmap(Addr vaddr, int64_t size);