From e1ac9629398027186ef4c2a66772aeff2b4c6792 Mon Sep 17 00:00:00 2001 From: Andreas Hansson Date: Wed, 3 Sep 2014 07:42:21 -0400 Subject: arch: Cleanup unused ISA traits constants This patch prunes unused values, and also unifies how the values are defined (not using an enum for ALPHA), aligning the use of int vs Addr etc. The patch also removes the duplication of PageBytes/PageShift and VMPageSize/LogVMPageSize. For all ISAs the two pairs had identical values and the latter has been removed. --- src/mem/cache/prefetch/base.cc | 2 +- src/mem/multi_level_page_table_impl.hh | 24 ++++++++++++------------ src/mem/page_table.hh | 6 +++--- src/mem/ruby/common/Address.cc | 2 +- src/mem/se_translating_port_proxy.cc | 14 +++++++------- 5 files changed, 24 insertions(+), 24 deletions(-) (limited to 'src/mem') diff --git a/src/mem/cache/prefetch/base.cc b/src/mem/cache/prefetch/base.cc index d5cddc88e..57c1424bf 100644 --- a/src/mem/cache/prefetch/base.cc +++ b/src/mem/cache/prefetch/base.cc @@ -312,7 +312,7 @@ BasePrefetcher::inPrefetch(Addr address, bool is_secure) bool BasePrefetcher::samePage(Addr a, Addr b) { - return roundDown(a, TheISA::VMPageSize) == roundDown(b, TheISA::VMPageSize); + return roundDown(a, TheISA::PageBytes) == roundDown(b, TheISA::PageBytes); } diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh index d944ff04b..9ffcf02e8 100644 --- a/src/mem/multi_level_page_table_impl.hh +++ b/src/mem/multi_level_page_table_impl.hh @@ -74,13 +74,13 @@ MultiLevelPageTable::initState(ThreadContext* tc) /* setting first level of the page table */ uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) + logLevelSize[numLevels-1]; - assert(log_req_size >= LogVMPageSize); - uint64_t npages = 1 << (log_req_size - LogVMPageSize); + assert(log_req_size >= PageShift); + uint64_t npages = 1 << (log_req_size - PageShift); Addr paddr = system->allocPhysPages(npages); PortProxy &p = system->physProxy; - p.memsetBlob(paddr, 0, npages << LogVMPageSize); + p.memsetBlob(paddr, 0, npages << PageShift); } @@ -93,7 +93,7 @@ MultiLevelPageTable::walk(Addr vaddr, bool allocate, Addr &PTE_addr) Addr level_base = basePtr; for (int i = numLevels - 1; i > 0; i--) { - Addr entry_addr = (level_base<physProxy; @@ -106,16 +106,16 @@ MultiLevelPageTable::walk(Addr vaddr, bool allocate, Addr &PTE_addr) uint64_t log_req_size = floorLog2(sizeof(PageTableEntry)) + logLevelSize[i-1]; - assert(log_req_size >= LogVMPageSize); - uint64_t npages = 1 << (log_req_size - LogVMPageSize); + assert(log_req_size >= PageShift); + uint64_t npages = 1 << (log_req_size - PageShift); DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n", npages, i-1); /* allocate new entry */ Addr next_entry_paddr = system->allocPhysPages(npages); - p.memsetBlob(next_entry_paddr, 0, npages << LogVMPageSize); + p.memsetBlob(next_entry_paddr, 0, npages << PageShift); - next_entry_pnum = next_entry_paddr >> LogVMPageSize; + next_entry_pnum = next_entry_paddr >> PageShift; pTableISAOps.setPnum(entry, next_entry_pnum); pTableISAOps.setPTEFields(entry); p.write(entry_addr, entry); @@ -125,7 +125,7 @@ MultiLevelPageTable::walk(Addr vaddr, bool allocate, Addr &PTE_addr) level_base = next_entry_pnum; } - PTE_addr = (level_base<::map(Addr vaddr, Addr paddr, int64_t size, bool clob PageTableEntry PTE = p.read(PTE_addr); Addr entry_paddr = pTableISAOps.getPnum(PTE); if (!clobber && entry_paddr == 0) { - pTableISAOps.setPnum(PTE, paddr >> LogVMPageSize); + pTableISAOps.setPnum(PTE, paddr >> PageShift); pTableISAOps.setPTEFields(PTE); p.write(PTE_addr, PTE); DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr); @@ -193,7 +193,7 @@ MultiLevelPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr) walk(new_vaddr, true, new_PTE_addr); PageTableEntry new_PTE = p.read(new_PTE_addr); - pTableISAOps.setPnum(new_PTE, paddr>>LogVMPageSize); + pTableISAOps.setPnum(new_PTE, paddr>>PageShift); pTableISAOps.setPTEFields(new_PTE); p.write(new_PTE_addr, new_PTE); DPRINTF(MMU, "Remapping: %#x-%#x\n", vaddr, new_PTE_addr); @@ -285,7 +285,7 @@ MultiLevelPageTable::lookup(Addr vaddr, TlbEntry &entry) if (pnum == 0) return false; - entry = TlbEntry(pid, vaddr, pnum << LogVMPageSize); + entry = TlbEntry(pid, vaddr, pnum << PageShift); updateCache(page_addr, entry); } else { return false; diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh index 010259a72..b906e5b82 100644 --- a/src/mem/page_table.hh +++ b/src/mem/page_table.hh @@ -73,7 +73,7 @@ class PageTableBase public: PageTableBase(const std::string &__name, uint64_t _pid, - Addr _pageSize = TheISA::VMPageSize) + Addr _pageSize = TheISA::PageBytes) : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))), pid(_pid), _name(__name) { @@ -188,7 +188,7 @@ class FuncPageTable : public PageTableBase public: FuncPageTable(const std::string &__name, uint64_t _pid, - Addr _pageSize = TheISA::VMPageSize); + Addr _pageSize = TheISA::PageBytes); ~FuncPageTable(); @@ -229,7 +229,7 @@ class NoArchPageTable : public FuncPageTable { public: NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys, - Addr _pageSize = TheISA::VMPageSize) : FuncPageTable(__name, _pid) + Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid) { fatal("No architectural page table defined for this ISA.\n"); } diff --git a/src/mem/ruby/common/Address.cc b/src/mem/ruby/common/Address.cc index 692f4cae8..eb234f46e 100644 --- a/src/mem/ruby/common/Address.cc +++ b/src/mem/ruby/common/Address.cc @@ -139,7 +139,7 @@ Address::operator=(const Address& obj) void Address::makePageAddress() { - m_address = maskLowOrderBits(TheISA::LogVMPageSize); + m_address = maskLowOrderBits(TheISA::PageShift); } Address diff --git a/src/mem/se_translating_port_proxy.cc b/src/mem/se_translating_port_proxy.cc index 1060a3270..ef9ce5850 100644 --- a/src/mem/se_translating_port_proxy.cc +++ b/src/mem/se_translating_port_proxy.cc @@ -68,7 +68,7 @@ SETranslatingPortProxy::tryReadBlob(Addr addr, uint8_t *p, int size) const { int prevSize = 0; - for (ChunkGenerator gen(addr, size, VMPageSize); !gen.done(); gen.next()) { + for (ChunkGenerator gen(addr, size, PageBytes); !gen.done(); gen.next()) { Addr paddr; if (!pTable->translate(gen.addr(),paddr)) @@ -94,13 +94,13 @@ SETranslatingPortProxy::tryWriteBlob(Addr addr, uint8_t *p, int size) const { int prevSize = 0; - for (ChunkGenerator gen(addr, size, VMPageSize); !gen.done(); gen.next()) { + for (ChunkGenerator gen(addr, size, PageBytes); !gen.done(); gen.next()) { Addr paddr; if (!pTable->translate(gen.addr(), paddr)) { if (allocating == Always) { - process->allocateMem(roundDown(gen.addr(), VMPageSize), - VMPageSize); + process->allocateMem(roundDown(gen.addr(), PageBytes), + PageBytes); } else if (allocating == NextPage) { // check if we've accessed the next page on the stack if (!process->fixupStackFault(gen.addr())) @@ -130,13 +130,13 @@ SETranslatingPortProxy::writeBlob(Addr addr, uint8_t *p, int size) const bool SETranslatingPortProxy::tryMemsetBlob(Addr addr, uint8_t val, int size) const { - for (ChunkGenerator gen(addr, size, VMPageSize); !gen.done(); gen.next()) { + for (ChunkGenerator gen(addr, size, PageBytes); !gen.done(); gen.next()) { Addr paddr; if (!pTable->translate(gen.addr(), paddr)) { if (allocating == Always) { - process->allocateMem(roundDown(gen.addr(), VMPageSize), - VMPageSize); + process->allocateMem(roundDown(gen.addr(), PageBytes), + PageBytes); pTable->translate(gen.addr(), paddr); } else { return false; -- cgit v1.2.3