diff options
author | Alexandru Dutu <alexandru.dutu@amd.com> | 2014-11-23 18:01:09 -0800 |
---|---|---|
committer | Alexandru Dutu <alexandru.dutu@amd.com> | 2014-11-23 18:01:09 -0800 |
commit | e4859fae5b5edc6df51f42e2b1859d6c8f7c15f6 (patch) | |
tree | 1b2c394f449cab364dbff1fcc19746a876195acd /src | |
parent | a19cf6943b58d9d76dd7256705394e002500a464 (diff) | |
download | gem5-e4859fae5b5edc6df51f42e2b1859d6c8f7c15f6.tar.xz |
mem: Page Table long lines
Trimmed down all the lines greater than 78 characters.
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/multi_level_page_table.hh | 3 | ||||
-rw-r--r-- | src/mem/multi_level_page_table_impl.hh | 21 | ||||
-rw-r--r-- | src/mem/page_table.cc | 9 | ||||
-rw-r--r-- | src/mem/page_table.hh | 3 |
4 files changed, 24 insertions, 12 deletions
diff --git a/src/mem/multi_level_page_table.hh b/src/mem/multi_level_page_table.hh index 9ae86924d..8d9febac8 100644 --- a/src/mem/multi_level_page_table.hh +++ b/src/mem/multi_level_page_table.hh @@ -141,7 +141,8 @@ class MultiLevelPageTable : public PageTableBase bool walk(Addr vaddr, bool allocate, Addr &PTE_addr); public: - MultiLevelPageTable(const std::string &__name, uint64_t _pid, System *_sys); + MultiLevelPageTable(const std::string &__name, uint64_t _pid, + System *_sys); ~MultiLevelPageTable(); void initState(ThreadContext* tc); diff --git a/src/mem/multi_level_page_table_impl.hh b/src/mem/multi_level_page_table_impl.hh index 9ffcf02e8..063e097c5 100644 --- a/src/mem/multi_level_page_table_impl.hh +++ b/src/mem/multi_level_page_table_impl.hh @@ -49,7 +49,8 @@ using namespace std; using namespace TheISA; template <class ISAOps> -MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name, uint64_t _pid, System *_sys) +MultiLevelPageTable<ISAOps>::MultiLevelPageTable(const std::string &__name, + uint64_t _pid, System *_sys) : PageTableBase(__name, _pid), system(_sys), logLevelSize(PageTableLayout), numLevels(logLevelSize.size()) @@ -109,7 +110,8 @@ MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr) assert(log_req_size >= PageShift); uint64_t npages = 1 << (log_req_size - PageShift); - DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n", npages, i-1); + DPRINTF(MMU, "Allocating %d pages needed for entry in level %d\n", + npages, i - 1); /* allocate new entry */ Addr next_entry_paddr = system->allocPhysPages(npages); @@ -121,7 +123,8 @@ MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr) p.write<PageTableEntry>(entry_addr, entry); } - DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n", i, level_base, offsets[i], next_entry_pnum); + DPRINTF(MMU, "Level %d base: %d offset: %d entry: %d\n", + i, level_base, offsets[i], next_entry_pnum); level_base = next_entry_pnum; } @@ -133,7 +136,8 @@ MultiLevelPageTable<ISAOps>::walk(Addr vaddr, bool allocate, Addr &PTE_addr) template <class ISAOps> void -MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) +MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr, + int64_t size, bool clobber) { // starting address must be page aligned assert(pageOffset(vaddr) == 0); @@ -153,7 +157,7 @@ MultiLevelPageTable<ISAOps>::map(Addr vaddr, Addr paddr, int64_t size, bool clob p.write<PageTableEntry>(PTE_addr, PTE); DPRINTF(MMU, "New mapping: %#x-%#x\n", vaddr, paddr); } else { - fatal("address 0x%x already mapped to %x", vaddr, entry_paddr); + fatal("addr 0x%x already mapped to %x", vaddr, entry_paddr); } eraseCacheEntry(vaddr); @@ -175,7 +179,9 @@ MultiLevelPageTable<ISAOps>::remap(Addr vaddr, int64_t size, Addr new_vaddr) PortProxy &p = system->physProxy; - for (; size > 0; size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) { + for (; size > 0; + size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) + { Addr PTE_addr; if (walk(vaddr, false, PTE_addr)) { PageTableEntry PTE = p.read<PageTableEntry>(PTE_addr); @@ -306,7 +312,8 @@ MultiLevelPageTable<ISAOps>::serialize(std::ostream &os) template <class ISAOps> void -MultiLevelPageTable<ISAOps>::unserialize(Checkpoint *cp, const std::string §ion) +MultiLevelPageTable<ISAOps>::unserialize(Checkpoint *cp, + const std::string §ion) { paramIn(cp, section, "ptable.pointer", basePtr); } diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc index 8770abf98..fdea1fbad 100644 --- a/src/mem/page_table.cc +++ b/src/mem/page_table.cc @@ -51,7 +51,8 @@ using namespace std; using namespace TheISA; -FuncPageTable::FuncPageTable(const std::string &__name, uint64_t _pid, Addr _pageSize) +FuncPageTable::FuncPageTable(const std::string &__name, + uint64_t _pid, Addr _pageSize) : PageTableBase(__name, _pid, _pageSize) { } @@ -71,7 +72,7 @@ FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber) for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) { if (!clobber && (pTable.find(vaddr) != pTable.end())) { // already mapped - fatal("FuncPageTable::allocate: address 0x%x already mapped", vaddr); + fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr); } pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr); @@ -89,7 +90,9 @@ FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr) DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr, new_vaddr, size); - for (; size > 0; size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) { + for (; size > 0; + size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) + { assert(pTable.find(vaddr) != pTable.end()); pTable[new_vaddr] = pTable[vaddr]; diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh index b906e5b82..be0983996 100644 --- a/src/mem/page_table.hh +++ b/src/mem/page_table.hh @@ -93,7 +93,8 @@ class PageTableBase Addr pageAlign(Addr a) { return (a & ~offsetMask); } Addr pageOffset(Addr a) { return (a & offsetMask); } - virtual void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false) = 0; + virtual void map(Addr vaddr, Addr paddr, int64_t size, + bool clobber = false) = 0; virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0; virtual void unmap(Addr vaddr, int64_t size) = 0; |