diff options
author | Gabe Black <gblack@eecs.umich.edu> | 2007-11-12 14:38:31 -0800 |
---|---|---|
committer | Gabe Black <gblack@eecs.umich.edu> | 2007-11-12 14:38:31 -0800 |
commit | fce45baf178b43c2ea1476967fba3766e9b2ea9d (patch) | |
tree | 1aa3ba357950f9a18e2d7a7e6fd4be8c8d0e5d91 | |
parent | f17f3d20be08d25f176138691a29897df54e5cc0 (diff) | |
download | gem5-fce45baf178b43c2ea1476967fba3766e9b2ea9d.tar.xz |
X86: Work on the page table walker, TLB, and related faults.
--HG--
extra : convert_revision : 9edde958b7e571c07072785f18f9109f73b8059f
-rw-r--r-- | src/arch/x86/X86TLB.py | 1 | ||||
-rw-r--r-- | src/arch/x86/faults.cc | 15 | ||||
-rw-r--r-- | src/arch/x86/faults.hh | 16 | ||||
-rw-r--r-- | src/arch/x86/pagetable.hh | 18 | ||||
-rw-r--r-- | src/arch/x86/tlb.cc | 431 | ||||
-rw-r--r-- | src/arch/x86/tlb.hh | 114 | ||||
-rw-r--r-- | src/cpu/BaseCPU.py | 4 | ||||
-rw-r--r-- | src/cpu/simple/base.cc | 2 |
8 files changed, 473 insertions, 128 deletions
diff --git a/src/arch/x86/X86TLB.py b/src/arch/x86/X86TLB.py index 2d562ba9a..5c174be59 100644 --- a/src/arch/x86/X86TLB.py +++ b/src/arch/x86/X86TLB.py @@ -55,6 +55,7 @@ from MemObject import MemObject from m5.params import * +from m5.proxy import * class X86TLB(MemObject): type = 'X86TLB' diff --git a/src/arch/x86/faults.cc b/src/arch/x86/faults.cc index 13341f1de..abb5d98d7 100644 --- a/src/arch/x86/faults.cc +++ b/src/arch/x86/faults.cc @@ -93,6 +93,8 @@ #include "arch/x86/isa_traits.hh" #include "mem/page_table.hh" #include "sim/process.hh" +#else +#include "arch/x86/tlb.hh" #endif namespace X86ISA @@ -112,6 +114,19 @@ namespace X86ISA { panic("X86 faults are not implemented!"); } + + void FakeITLBFault::invoke(ThreadContext * tc) + { + // Start the page table walker. + tc->getITBPtr()->walker.start(tc, vaddr); + } + + void FakeDTLBFault::invoke(ThreadContext * tc) + { + // Start the page table walker. + tc->getDTBPtr()->walker.start(tc, vaddr); + } + #else // !FULL_SYSTEM void FakeITLBFault::invoke(ThreadContext * tc) { diff --git a/src/arch/x86/faults.hh b/src/arch/x86/faults.hh index 5a573754a..78a55d0e1 100644 --- a/src/arch/x86/faults.hh +++ b/src/arch/x86/faults.hh @@ -369,44 +369,28 @@ namespace X86ISA // the tlb on a miss and are to take the place of a hardware table walker. class FakeITLBFault : public X86Fault { -#if !FULL_SYSTEM protected: Addr vaddr; public: FakeITLBFault(Addr _vaddr) : X86Fault("fake instruction tlb fault", "itlb"), vaddr(_vaddr) -#else - public: - FakeITLBFault() : - X86Fault("fake instruction tlb fault", "itlb") -#endif {} -#if !FULL_SYSTEM void invoke(ThreadContext * tc); -#endif }; class FakeDTLBFault : public X86Fault { -#if !FULL_SYSTEM protected: Addr vaddr; public: FakeDTLBFault(Addr _vaddr) : X86Fault("fake data tlb fault", "dtlb"), vaddr(_vaddr) -#else - public: - FakeDTLBFault() : - X86Fault("fake data tlb fault", "dtlb") -#endif {} -#if !FULL_SYSTEM void invoke(ThreadContext * tc); -#endif }; }; diff --git a/src/arch/x86/pagetable.hh b/src/arch/x86/pagetable.hh index cc614168c..e42693c03 100644 --- a/src/arch/x86/pagetable.hh +++ b/src/arch/x86/pagetable.hh @@ -62,16 +62,26 @@ #include <string> #include "sim/host.hh" +#include "base/bitunion.hh" #include "base/misc.hh" class Checkpoint; namespace X86ISA { - struct VAddr - { - VAddr(Addr a) { panic("not implemented yet."); } - }; + BitUnion64(VAddr) + Bitfield<20, 12> longl1; + Bitfield<29, 21> longl2; + Bitfield<38, 30> longl3; + Bitfield<47, 39> longl4; + + Bitfield<20, 12> pael1; + Bitfield<29, 21> pael2; + Bitfield<31, 30> pael3; + + Bitfield<21, 12> norml1; + Bitfield<31, 22> norml2; + EndBitUnion(VAddr) struct TlbEntry { diff --git a/src/arch/x86/tlb.cc b/src/arch/x86/tlb.cc index e30e820b4..704ab3027 100644 --- a/src/arch/x86/tlb.cc +++ b/src/arch/x86/tlb.cc @@ -64,6 +64,7 @@ #include "arch/x86/x86_traits.hh" #include "base/bitfield.hh" #include "base/trace.hh" +#include "config/full_system.hh" #include "cpu/thread_context.hh" #include "cpu/base.hh" #include "mem/packet_access.hh" @@ -72,7 +73,11 @@ namespace X86ISA { +#if FULL_SYSTEM TLB::TLB(const Params *p) : MemObject(p), walker(name(), this), size(p->size) +#else +TLB::TLB(const Params *p) : MemObject(p), size(p->size) +#endif { tlb = new TlbEntry[size]; std::memset(tlb, 0, sizeof(TlbEntry) * size); @@ -81,91 +86,377 @@ TLB::TLB(const Params *p) : MemObject(p), walker(name(), this), size(p->size) freeList.push_back(&tlb[x]); } -bool -TLB::Walker::doNext(uint64_t data, PacketPtr &write) +#if FULL_SYSTEM + +// Unfortunately, the placement of the base field in a page table entry is +// very erratic and would make a mess here. It might be moved here at some +// point in the future. +BitUnion64(PageTableEntry) + Bitfield<63> nx; + Bitfield<11, 9> avl; + Bitfield<8> g; + Bitfield<7> ps; + Bitfield<6> d; + Bitfield<5> a; + Bitfield<4> pcd; + Bitfield<3> pwt; + Bitfield<2> u; + Bitfield<1> w; + Bitfield<0> p; +EndBitUnion(PageTableEntry) + +void +TLB::Walker::doNext(PacketPtr &read, PacketPtr &write) { assert(state != Ready && state != Waiting); write = NULL; + PageTableEntry pte; + if (size == 8) + pte = read->get<uint64_t>(); + else + pte = read->get<uint32_t>(); + VAddr vaddr = entry.vaddr; + bool uncacheable = pte.pcd; + Addr nextRead = 0; + bool doWrite = false; + bool badNX = pte.nx && (!tlb->allowNX || !enableNX); switch(state) { case LongPML4: + nextRead = ((uint64_t)pte & (mask(40) << 12)) + vaddr.longl3 * size; + doWrite = !pte.a; + pte.a = 1; + entry.writable = pte.w; + entry.user = pte.u; + if (badNX) + panic("NX violation!\n"); + entry.noExec = pte.nx; + if (!pte.p) + panic("Page not present!\n"); nextState = LongPDP; break; case LongPDP: + nextRead = ((uint64_t)pte & (mask(40) << 12)) + vaddr.longl2 * size; + doWrite = !pte.a; + pte.a = 1; + entry.writable = entry.writable && pte.w; + entry.user = entry.user && pte.u; + if (badNX) + panic("NX violation!\n"); + if (!pte.p) + panic("Page not present!\n"); nextState = LongPD; break; case LongPD: - nextState = LongPTE; - break; + doWrite = !pte.a; + pte.a = 1; + entry.writable = entry.writable && pte.w; + entry.user = entry.user && pte.u; + if (badNX) + panic("NX violation!\n"); + if (!pte.p) + panic("Page not present!\n"); + if (!pte.ps) { + // 4 KB page + entry.size = 4 * (1 << 10); + nextRead = + ((uint64_t)pte & (mask(40) << 12)) + vaddr.longl1 * size; + nextState = LongPTE; + break; + } else { + // 2 MB page + entry.size = 2 * (1 << 20); + entry.paddr = (uint64_t)pte & (mask(31) << 21); + entry.uncacheable = uncacheable; + entry.global = pte.g; + entry.patBit = bits(pte, 12); + entry.vaddr = entry.vaddr & ~((2 * (1 << 20)) - 1); + tlb->insert(entry.vaddr, entry); + nextState = Ready; + delete read->req; + delete read; + read = NULL; + return; + } case LongPTE: + doWrite = !pte.a; + pte.a = 1; + entry.writable = entry.writable && pte.w; + entry.user = entry.user && pte.u; + if (badNX) + panic("NX violation!\n"); + if (!pte.p) + panic("Page not present!\n"); + entry.paddr = (uint64_t)pte & (mask(40) << 12); + entry.uncacheable = uncacheable; + entry.global = pte.g; + entry.patBit = bits(pte, 12); + entry.vaddr = entry.vaddr & ~((4 * (1 << 10)) - 1); + tlb->insert(entry.vaddr, entry); nextState = Ready; - return false; + delete read->req; + delete read; + read = NULL; + return; case PAEPDP: + nextRead = ((uint64_t)pte & (mask(40) << 12)) + vaddr.pael2 * size; + if (!pte.p) + panic("Page not present!\n"); nextState = PAEPD; break; case PAEPD: - break; + doWrite = !pte.a; + pte.a = 1; + entry.writable = pte.w; + entry.user = pte.u; + if (badNX) + panic("NX violation!\n"); + if (!pte.p) + panic("Page not present!\n"); + if (!pte.ps) { + // 4 KB page + entry.size = 4 * (1 << 10); + nextRead = ((uint64_t)pte & (mask(40) << 12)) + vaddr.pael1 * size; + nextState = PAEPTE; + break; + } else { + // 2 MB page + entry.size = 2 * (1 << 20); + entry.paddr = (uint64_t)pte & (mask(31) << 21); + entry.uncacheable = uncacheable; + entry.global = pte.g; + entry.patBit = bits(pte, 12); + entry.vaddr = entry.vaddr & ~((2 * (1 << 20)) - 1); + tlb->insert(entry.vaddr, entry); + nextState = Ready; + delete read->req; + delete read; + read = NULL; + return; + } case PAEPTE: + doWrite = !pte.a; + pte.a = 1; + entry.writable = entry.writable && pte.w; + entry.user = entry.user && pte.u; + if (badNX) + panic("NX violation!\n"); + if (!pte.p) + panic("Page not present!\n"); + entry.paddr = (uint64_t)pte & (mask(40) << 12); + entry.uncacheable = uncacheable; + entry.global = pte.g; + entry.patBit = bits(pte, 7); + entry.vaddr = entry.vaddr & ~((4 * (1 << 10)) - 1); + tlb->insert(entry.vaddr, entry); nextState = Ready; - return false; + delete read->req; + delete read; + read = NULL; + return; case PSEPD: - break; + doWrite = !pte.a; + pte.a = 1; + entry.writable = pte.w; + entry.user = pte.u; + if (!pte.p) + panic("Page not present!\n"); + if (!pte.ps) { + // 4 KB page + entry.size = 4 * (1 << 10); + nextRead = + ((uint64_t)pte & (mask(20) << 12)) + vaddr.norml2 * size; + nextState = PTE; + break; + } else { + // 4 MB page + entry.size = 4 * (1 << 20); + entry.paddr = bits(pte, 20, 13) << 32 | bits(pte, 31, 22) << 22; + entry.uncacheable = uncacheable; + entry.global = pte.g; + entry.patBit = bits(pte, 12); + entry.vaddr = entry.vaddr & ~((4 * (1 << 20)) - 1); + tlb->insert(entry.vaddr, entry); + nextState = Ready; + delete read->req; + delete read; + read = NULL; + return; + } case PD: + doWrite = !pte.a; + pte.a = 1; + entry.writable = pte.w; + entry.user = pte.u; + if (!pte.p) + panic("Page not present!\n"); + // 4 KB page + entry.size = 4 * (1 << 10); + nextRead = ((uint64_t)pte & (mask(20) << 12)) + vaddr.norml2 * size; + nextState = PTE; + break; nextState = PTE; break; case PTE: + doWrite = !pte.a; + pte.a = 1; + entry.writable = pte.w; + entry.user = pte.u; + if (!pte.p) + panic("Page not present!\n"); + entry.paddr = (uint64_t)pte & (mask(20) << 12); + entry.uncacheable = uncacheable; + entry.global = pte.g; + entry.patBit = bits(pte, 7); + entry.vaddr = entry.vaddr & ~((4 * (1 << 10)) - 1); + tlb->insert(entry.vaddr, entry); nextState = Ready; - return false; + delete read->req; + delete read; + read = NULL; + return; default: panic("Unknown page table walker state %d!\n"); } - return true; + PacketPtr oldRead = read; + //If we didn't return, we're setting up another read. + uint32_t flags = oldRead->req->getFlags(); + if (uncacheable) + flags |= UNCACHEABLE; + else + flags &= ~UNCACHEABLE; + RequestPtr request = + new Request(nextRead, oldRead->getSize(), flags); + read = new Packet(request, MemCmd::ReadExReq, Packet::Broadcast); + read->allocate(); + //If we need to write, adjust the read packet to write the modified value + //back to memory. + if (doWrite) { + write = oldRead; + write->set<uint64_t>(pte); + write->cmd = MemCmd::WriteReq; + write->setDest(Packet::Broadcast); + } else { + write = NULL; + delete oldRead->req; + delete oldRead; + } } void -TLB::Walker::buildReadPacket(Addr addr) +TLB::Walker::start(ThreadContext * _tc, Addr vaddr) { - readRequest.setPhys(addr, size, PHYSICAL | uncachable ? UNCACHEABLE : 0); - readPacket.reinitFromRequest(); + assert(state == Ready); + assert(!tc); + tc = _tc; + + VAddr addr = vaddr; + + //Figure out what we're doing. + CR3 cr3 = tc->readMiscRegNoEffect(MISCREG_CR3); + Addr top = 0; + // Check if we're in long mode or not + Efer efer = tc->readMiscRegNoEffect(MISCREG_EFER); + size = 8; + if (efer.lma) { + // Do long mode. + state = LongPML4; + top = (cr3.longPdtb << 12) + addr.longl4 * size; + } else { + // We're in some flavor of legacy mode. + CR4 cr4 = tc->readMiscRegNoEffect(MISCREG_CR4); + if (cr4.pae) { + // Do legacy PAE. + state = PAEPDP; + top = (cr3.paePdtb << 5) + addr.pael3 * size; + } else { + size = 4; + top = (cr3.pdtb << 12) + addr.norml2 * size; + if (cr4.pse) { + // Do legacy PSE. + state = PSEPD; + } else { + // Do legacy non PSE. + state = PD; + } + } + } + nextState = Ready; + entry.vaddr = vaddr; + + enableNX = efer.nxe; + + RequestPtr request = + new Request(top, size, PHYSICAL | cr3.pcd ? UNCACHEABLE : 0); + read = new Packet(request, MemCmd::ReadExReq, Packet::Broadcast); + read->allocate(); + Enums::MemoryMode memMode = tlb->sys->getMemoryMode(); + if (memMode == Enums::timing) { + tc->suspend(); + port.sendTiming(read); + } else if (memMode == Enums::atomic) { + do { + port.sendAtomic(read); + PacketPtr write = NULL; + doNext(read, write); + state = nextState; + nextState = Ready; + if (write) + port.sendAtomic(write); + } while(read); + tc = NULL; + state = Ready; + nextState = Waiting; + } else { + panic("Unrecognized memory system mode.\n"); + } } -TLB::walker::buildWritePacket(Addr addr) +bool +TLB::Walker::WalkerPort::recvTiming(PacketPtr pkt) { - writeRequest.setPhys(addr, size, PHYSICAL | uncachable ? UNCACHEABLE : 0); - writePacket.reinitFromRequest(); + return walker->recvTiming(pkt); +} bool -TLB::Walker::WalkerPort::recvTiming(PacketPtr pkt) +TLB::Walker::recvTiming(PacketPtr pkt) { + inflight--; if (pkt->isResponse() && !pkt->wasNacked()) { if (pkt->isRead()) { - assert(packet); - assert(walker->state == Waiting); - packet = NULL; - walker->state = walker->nextState; - walker->nextState = Ready; - PacketPtr write; - if (walker->doNext(pkt, write)) { - packet = &walker->packet; - port->sendTiming(packet); - } + assert(inflight); + assert(state == Waiting); + assert(!read); + state = nextState; + nextState = Ready; + PacketPtr write = NULL; + doNext(pkt, write); + state = Waiting; + read = pkt; if (write) { writes.push_back(write); } - while (!port->blocked() && writes.size()) { - if (port->sendTiming(writes.front())) { - writes.pop_front(); - outstandingWrites++; - } - } + sendPackets(); } else { - outstandingWrites--; + sendPackets(); + } + if (inflight == 0 && read == NULL && writes.size() == 0) { + tc->activate(0); + tc = NULL; + state = Ready; + nextState = Waiting; } } else if (pkt->wasNacked()) { pkt->reinitNacked(); - if (!sendTiming(pkt)) { + if (!port.sendTiming(pkt)) { + retrying = true; if (pkt->isWrite()) { - writes.push_front(pkt); + writes.push_back(pkt); + } else { + assert(!read); + read = pkt; } + } else { + inflight++; } } return true; @@ -200,9 +491,47 @@ TLB::Walker::WalkerPort::recvStatusChange(Status status) void TLB::Walker::WalkerPort::recvRetry() { + walker->recvRetry(); +} + +void +TLB::Walker::recvRetry() +{ retrying = false; - if (!sendTiming(packet)) { - retrying = true; + sendPackets(); +} + +void +TLB::Walker::sendPackets() +{ + //If we're already waiting for the port to become available, just return. + if (retrying) + return; + + //Reads always have priority + if (read) { + if (!port.sendTiming(read)) { + retrying = true; + return; + } else { + inflight++; + delete read->req; + delete read; + read = NULL; + } + } + //Send off as many of the writes as we can. + while (writes.size()) { + PacketPtr write = writes.back(); + if (!port.sendTiming(write)) { + retrying = true; + return; + } else { + inflight++; + delete write->req; + delete write; + writes.pop_back(); + } } } @@ -215,6 +544,16 @@ TLB::getPort(const std::string &if_name, int idx) panic("No tlb port named %s!\n", if_name); } +#else + +Port * +TLB::getPort(const std::string &if_name, int idx) +{ + panic("No tlb ports in se!\n", if_name); +} + +#endif + void TLB::insert(Addr vpn, TlbEntry &entry) { @@ -582,10 +921,12 @@ TLB::translate(RequestPtr &req, ThreadContext *tc, bool write, bool execute) // If protected mode has been enabled... if (cr0.pe) { + DPRINTF(TLB, "In protected mode.\n"); Efer efer = tc->readMiscRegNoEffect(MISCREG_EFER); SegAttr csAttr = tc->readMiscRegNoEffect(MISCREG_CS_ATTR); // If we're not in 64-bit mode, do protection/limit checks if (!efer.lma || !csAttr.longMode) { + DPRINTF(TLB, "Not in long mode. Checking segment protection.\n"); SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg)); if (!attr.writable && write) return new GeneralProtection(0); @@ -594,6 +935,7 @@ TLB::translate(RequestPtr &req, ThreadContext *tc, bool write, bool execute) Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg)); Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg)); if (!attr.expandDown) { + DPRINTF(TLB, "Checking an expand down segment.\n"); // We don't have to worry about the access going around the // end of memory because accesses will be broken up into // pieces at boundaries aligned on sizes smaller than an @@ -618,25 +960,28 @@ TLB::translate(RequestPtr &req, ThreadContext *tc, bool write, bool execute) } // If paging is enabled, do the translation. if (cr0.pg) { + DPRINTF(TLB, "Paging enabled.\n"); // The vaddr already has the segment base applied. TlbEntry *entry = lookup(vaddr); if (!entry) { -#if FULL_SYSTEM - return new TlbFault(); -#else return new TlbFault(vaddr); -#endif } else { // Do paging protection checks. - Addr paddr = entry->paddr | (vaddr & mask(12)); + DPRINTF(TLB, "Entry found with paddr %#x, doing protection checks.\n", entry->paddr); + Addr paddr = entry->paddr | (vaddr & (entry->size-1)); + DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr); req->setPaddr(paddr); } } else { //Use the address which already has segmentation applied. + DPRINTF(TLB, "Paging disabled.\n"); + DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr); req->setPaddr(vaddr); } } else { // Real mode + DPRINTF(TLB, "In real mode.\n"); + DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr); req->setPaddr(vaddr); } return NoFault; diff --git a/src/arch/x86/tlb.hh b/src/arch/x86/tlb.hh index 726c25374..d45f94520 100644 --- a/src/arch/x86/tlb.hh +++ b/src/arch/x86/tlb.hh @@ -59,6 +59,7 @@ #define __ARCH_X86_TLB_HH__ #include <list> +#include <vector> #include <string> #include "arch/x86/pagetable.hh" @@ -88,6 +89,8 @@ namespace X86ISA System * sys; + bool allowNX; + public: typedef X86TLBParams Params; TLB(const Params *p); @@ -116,65 +119,55 @@ namespace X86ISA PTE }; - // Act on the current state and determine what to do next. If the - // walker has finished updating the TLB, this will return false. - bool doNext(PacketPtr read, PacketPtr &write); - - // This does an actual load to feed the walker. If we're in - // atomic mode, this will drive the state machine itself until - // the TLB is filled. If we're in timing mode, the port getting - // a reply will drive the machine using this function which will - // return after starting the memory operation. - void doMemory(Addr addr); + // Act on the current state and determine what to do next. read + // should be the packet that just came back from a read and write + // should be NULL. When the function returns, read is either NULL + // if the machine is finished, or points to a packet to initiate + // the next read. If any write is required to update an "accessed" + // bit, write will point to a packet to do the write. Otherwise it + // will be NULL. + void doNext(PacketPtr &read, PacketPtr &write); // Kick off the state machine. - void start(bool _uncachable, Addr _vaddr, Addr cr3, State next) - { - assert(state == Ready); - state = Waiting; - nextState = next; - // If PAE isn't being used, entries are 4 bytes. Otherwise - // they're 8. - if (next == PSEPD || next == PD || next == PTE) - size = 4; - else - size = 8; - vaddr = _vaddr; - uncachable = _uncacheable; - buildPacket(cr3); - if (state == Enums::timing) { - port->sendTiming(&packet); - } else if (state == Enums::atomic) { - port->sendAtomic(&packet); - Addr addr; - while(doNext(packet.get<uint64_t>(), addr)) { - buildPacket(addr); - port->sendAtomic(&packet); - } - } else { - panic("Unrecognized memory system mode.\n"); - } - }; + void start(ThreadContext * _tc, Addr vaddr); protected: friend class TLB; + /* + * State having to do with sending packets. + */ + PacketPtr read; + std::vector<PacketPtr> writes; + + // How many memory operations are in flight. + unsigned inflight; + + bool retrying; + + /* + * Functions for dealing with packets. + */ + bool recvTiming(PacketPtr pkt); + void recvRetry(); + + void sendPackets(); + + /* + * Port for accessing memory + */ class WalkerPort : public Port { public: WalkerPort(const std::string &_name, Walker * _walker) : Port(_name, _walker->tlb), walker(_walker), - packet(NULL), snoopRangeSent(false), retrying(false) + snoopRangeSent(false) {} protected: Walker * walker; - PacketPtr packet; - vector<PacketPtr> writes; - bool snoopRangeSent; - bool retrying; bool recvTiming(PacketPtr pkt); Tick recvAtomic(PacketPtr pkt); @@ -187,46 +180,41 @@ namespace X86ISA resp.clear(); snoop = true; } - - public: - bool sendTiming(PacketPtr pkt) - { - retrying = !Port::sendTiming(pkt); - return !retrying; - } - - bool blocked() { return retrying; } }; friend class WalkerPort; WalkerPort port; - Packet packet; - Request request; - + // The TLB we're supposed to load. TLB * tlb; + /* + * State machine state. + */ + ThreadContext * tc; State state; State nextState; int size; - - Addr vaddr; + bool enableNX; + TlbEntry entry; public: Walker(const std::string &_name, TLB * _tlb) : + read(NULL), inflight(0), retrying(false), port(_name + "-walker_port", this), - packet(&request, ReadExReq, Broadcast), - tlb(_tlb), state(Ready), nextState(Ready) + tlb(_tlb), + tc(NULL), state(Ready), nextState(Ready) { } - - }; Walker walker; + #endif + Port *getPort(const std::string &if_name, int idx = -1); + protected: int size; @@ -236,8 +224,6 @@ namespace X86ISA EntryList freeList; EntryList entryList; - Port *getPort(const std::string &if_name, int idx = -1); - void insert(Addr vpn, TlbEntry &entry); void invalidateAll(); @@ -262,6 +248,8 @@ namespace X86ISA typedef X86ITBParams Params; ITB(const Params *p) : TLB(p) { + sys = p->system; + allowNX = false; } Fault translate(RequestPtr &req, ThreadContext *tc); @@ -275,6 +263,8 @@ namespace X86ISA typedef X86DTBParams Params; DTB(const Params *p) : TLB(p) { + sys = p->system; + allowNX = true; } Fault translate(RequestPtr &req, ThreadContext *tc, bool write); #if FULL_SYSTEM diff --git a/src/cpu/BaseCPU.py b/src/cpu/BaseCPU.py index 1af30a532..cb5793e57 100644 --- a/src/cpu/BaseCPU.py +++ b/src/cpu/BaseCPU.py @@ -100,7 +100,7 @@ class BaseCPU(SimObject): _mem_ports = [] - if build_env['TARGET_ISA'] == 'x86': + if build_env['TARGET_ISA'] == 'x86' and build_env['FULL_SYSTEM']: itb.walker_port = Port("ITB page table walker port") dtb.walker_port = Port("ITB page table walker port") _mem_ports = ["itb.walker_port", "dtb.walker_port"] @@ -117,7 +117,7 @@ class BaseCPU(SimObject): self.icache_port = ic.cpu_side self.dcache_port = dc.cpu_side self._mem_ports = ['icache.mem_side', 'dcache.mem_side'] - if build_env['TARGET_ISA'] == 'x86': + if build_env['TARGET_ISA'] == 'x86' and build_env['FULL_SYSTEM']: self._mem_ports += ["itb.walker_port", "dtb.walker_port"] def addTwoLevelCacheHierarchy(self, ic, dc, l2c): diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc index 1611a7275..98e29d8d1 100644 --- a/src/cpu/simple/base.cc +++ b/src/cpu/simple/base.cc @@ -466,9 +466,9 @@ BaseSimpleCPU::advancePC(Fault fault) if (fault != NoFault) { curMacroStaticInst = StaticInst::nullStaticInstPtr; predecoder.reset(); - fault->invoke(tc); thread->setMicroPC(0); thread->setNextMicroPC(1); + fault->invoke(tc); } else { //If we're at the last micro op for this instruction if (curStaticInst && curStaticInst->isLastMicroop()) { |