diff options
author | Andreas Hansson <andreas.hansson@arm.com> | 2012-05-01 13:40:42 -0400 |
---|---|---|
committer | Andreas Hansson <andreas.hansson@arm.com> | 2012-05-01 13:40:42 -0400 |
commit | 3fea59e1629f5dac55a7d36752e822bee7fd7fa7 (patch) | |
tree | 5fd0076b5920a217f8463c66be3df9effe8e4324 /src/cpu | |
parent | 8966e6d36d17acce3ddac13b309eeb12c7711f27 (diff) | |
download | gem5-3fea59e1629f5dac55a7d36752e822bee7fd7fa7.tar.xz |
MEM: Separate requests and responses for timing accesses
This patch moves send/recvTiming and send/recvTimingSnoop from the
Port base class to the MasterPort and SlavePort, and also splits them
into separate member functions for requests and responses:
send/recvTimingReq, send/recvTimingResp, and send/recvTimingSnoopReq,
send/recvTimingSnoopResp. A master port sends requests and receives
responses, and also receives snoop requests and sends snoop
responses. A slave port has the reciprocal behaviour as it receives
requests and sends responses, and sends snoop requests and receives
snoop responses.
For all MemObjects that have only master ports or slave ports (but not
both), e.g. a CPU, or a PIO device, this patch merely adds more
clarity to what kind of access is taking place. For example, a CPU
port used to call sendTiming, and will now call
sendTimingReq. Similarly, a response previously came back through
recvTiming, which is now recvTimingResp. For the modules that have
both master and slave ports, e.g. the bus, the behaviour was
previously relying on branches based on pkt->isRequest(), and this is
now replaced with a direct call to the apprioriate member function
depending on the type of access. Please note that send/recvRetry is
still shared by all the timing accessors and remains in the Port base
class for now (to maintain the current bus functionality and avoid
changing the statistics of all regressions).
The packet queue is split into a MasterPort and SlavePort version to
facilitate the use of the new timing accessors. All uses of the
PacketQueue are updated accordingly.
With this patch, the type of packet (request or response) is now well
defined for each type of access, and asserts on pkt->isRequest() and
pkt->isResponse() are now moved to the appropriate send member
functions. It is also worth noting that sendTimingSnoopReq no longer
returns a boolean, as the semantics do not alow snoop requests to be
rejected or stalled. All these assumptions are now excplicitly part of
the port interface itself.
Diffstat (limited to 'src/cpu')
25 files changed, 59 insertions, 73 deletions
diff --git a/src/cpu/base.cc b/src/cpu/base.cc index edbec8c80..c942cad44 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -532,7 +532,7 @@ BaseCPU::traceFunctionsInternal(Addr pc) } bool -BaseCPU::CpuPort::recvTiming(PacketPtr pkt) +BaseCPU::CpuPort::recvTimingResp(PacketPtr pkt) { panic("BaseCPU doesn't expect recvTiming!\n"); return true; diff --git a/src/cpu/base.hh b/src/cpu/base.hh index f94c5e0a4..5d88e064b 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -133,7 +133,7 @@ class BaseCPU : public MemObject protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry(); diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc index 04176c54f..7e75dfbb8 100644 --- a/src/cpu/inorder/cpu.cc +++ b/src/cpu/inorder/cpu.cc @@ -88,10 +88,8 @@ InOrderCPU::CachePort::CachePort(CacheUnit *_cacheUnit) : { } bool -InOrderCPU::CachePort::recvTiming(Packet *pkt) +InOrderCPU::CachePort::recvTimingResp(Packet *pkt) { - assert(pkt->isResponse()); - if (pkt->isError()) DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n", pkt->getAddr()); diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh index 06d733d85..bb52c6023 100644 --- a/src/cpu/inorder/cpu.hh +++ b/src/cpu/inorder/cpu.hh @@ -170,13 +170,13 @@ class InOrderCPU : public BaseCPU protected: /** Timing version of receive */ - bool recvTiming(PacketPtr pkt); + bool recvTimingResp(PacketPtr pkt); /** Handles doing a retry of a failed timing request. */ void recvRetry(); /** Ignoring snoops for now. */ - bool recvTimingSnoop(PacketPtr pkt) { return true; } + void recvTimingSnoopReq(PacketPtr pkt) { } }; /** Define TickEvent for the CPU */ diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc index a5bb9cd24..a4dc23d47 100644 --- a/src/cpu/inorder/resources/cache_unit.cc +++ b/src/cpu/inorder/resources/cache_unit.cc @@ -873,7 +873,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, tid, inst->seqNum, cache_req->dataPkt->getAddr()); if (do_access) { - if (!cachePort->sendTiming(cache_req->dataPkt)) { + if (!cachePort->sendTimingReq(cache_req->dataPkt)) { DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] cannot access cache, because port " "is blocked. now waiting to retry request\n", tid, diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index fe70c3fcf..e8fc968b7 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -87,9 +87,8 @@ BaseO3CPU::regStats() template<class Impl> bool -FullO3CPU<Impl>::IcachePort::recvTiming(PacketPtr pkt) +FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); DPRINTF(O3CPU, "Fetch unit received timing\n"); // We shouldn't ever get a block in ownership state assert(!(pkt->memInhibitAsserted() && !pkt->sharedAsserted())); @@ -107,18 +106,16 @@ FullO3CPU<Impl>::IcachePort::recvRetry() template <class Impl> bool -FullO3CPU<Impl>::DcachePort::recvTiming(PacketPtr pkt) +FullO3CPU<Impl>::DcachePort::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); - return lsq->recvTiming(pkt); + return lsq->recvTimingResp(pkt); } template <class Impl> -bool -FullO3CPU<Impl>::DcachePort::recvTimingSnoop(PacketPtr pkt) +void +FullO3CPU<Impl>::DcachePort::recvTimingSnoopReq(PacketPtr pkt) { - assert(pkt->isRequest()); - return lsq->recvTimingSnoop(pkt); + lsq->recvTimingSnoopReq(pkt); } template <class Impl> diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh index be51f415f..41128110b 100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@ -148,8 +148,8 @@ class FullO3CPU : public BaseO3CPU /** Timing version of receive. Handles setting fetch to the * proper status to start fetching. */ - virtual bool recvTiming(PacketPtr pkt); - virtual bool recvTimingSnoop(PacketPtr pkt) { return true; } + virtual bool recvTimingResp(PacketPtr pkt); + virtual void recvTimingSnoopReq(PacketPtr pkt) { } /** Handles doing a retry of a failed fetch. */ virtual void recvRetry(); @@ -176,8 +176,8 @@ class FullO3CPU : public BaseO3CPU /** Timing version of receive. Handles writing back and * completing the load or store that has returned from * memory. */ - virtual bool recvTiming(PacketPtr pkt); - virtual bool recvTimingSnoop(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); + virtual void recvTimingSnoopReq(PacketPtr pkt); /** Handles doing a retry of the previous send. */ virtual void recvRetry(); diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index 2480211e4..f4ce77f22 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -621,7 +621,7 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req) fetchedCacheLines++; // Access the cache. - if (!cpu->getInstPort().sendTiming(data_pkt)) { + if (!cpu->getInstPort().sendTimingReq(data_pkt)) { assert(retryPkt == NULL); assert(retryTid == InvalidThreadID); DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid); @@ -1356,7 +1356,7 @@ DefaultFetch<Impl>::recvRetry() assert(retryTid != InvalidThreadID); assert(fetchStatus[retryTid] == IcacheWaitRetry); - if (cpu->getInstPort().sendTiming(retryPkt)) { + if (cpu->getInstPort().sendTimingReq(retryPkt)) { fetchStatus[retryTid] = IcacheWaitResponse; retryPkt = NULL; retryTid = InvalidThreadID; diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh index dac5fab18..026033539 100644 --- a/src/cpu/o3/lsq.hh +++ b/src/cpu/o3/lsq.hh @@ -297,9 +297,9 @@ class LSQ { * * @param pkt Response packet from the memory sub-system */ - bool recvTiming(PacketPtr pkt); + bool recvTimingResp(PacketPtr pkt); - bool recvTimingSnoop(PacketPtr pkt); + void recvTimingSnoopReq(PacketPtr pkt); /** The CPU pointer. */ O3CPU *cpu; diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index c2f410e37..72ffdd58b 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -319,9 +319,8 @@ LSQ<Impl>::recvRetry() template <class Impl> bool -LSQ<Impl>::recvTiming(PacketPtr pkt) +LSQ<Impl>::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); if (pkt->isError()) DPRINTF(LSQ, "Got error packet back for address: %#X\n", pkt->getAddr()); @@ -330,10 +329,9 @@ LSQ<Impl>::recvTiming(PacketPtr pkt) } template <class Impl> -bool -LSQ<Impl>::recvTimingSnoop(PacketPtr pkt) +void +LSQ<Impl>::recvTimingSnoopReq(PacketPtr pkt) { - assert(pkt->isRequest()); DPRINTF(LSQ, "received pkt for addr:%#x %s\n", pkt->getAddr(), pkt->cmdString()); @@ -345,9 +343,6 @@ LSQ<Impl>::recvTimingSnoop(PacketPtr pkt) thread[tid].checkSnoop(pkt); } } - - // to provide stronger consistency model - return true; } template<class Impl> diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index 44c3df0bf..ad1e26d2f 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -801,7 +801,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, state->mainPkt = data_pkt; } - if (!dcachePort->sendTiming(fst_data_pkt)) { + if (!dcachePort->sendTimingReq(fst_data_pkt)) { // Delete state and data packet because a load retry // initiates a pipeline restart; it does not retry. delete state; @@ -830,7 +830,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, // The first packet will return in completeDataAccess and be // handled there. ++usedPorts; - if (!dcachePort->sendTiming(snd_data_pkt)) { + if (!dcachePort->sendTimingReq(snd_data_pkt)) { // The main packet will be deleted in completeDataAccess. delete snd_data_pkt->req; diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index f4182e30d..4f82ad9e3 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -1180,7 +1180,7 @@ template <class Impl> bool LSQUnit<Impl>::sendStore(PacketPtr data_pkt) { - if (!dcachePort->sendTiming(data_pkt)) { + if (!dcachePort->sendTimingReq(data_pkt)) { // Need to handle becoming blocked on a store. isStoreBlocked = true; ++lsqCacheBlocked; @@ -1203,7 +1203,7 @@ LSQUnit<Impl>::recvRetry() LSQSenderState *state = dynamic_cast<LSQSenderState *>(retryPkt->senderState); - if (dcachePort->sendTiming(retryPkt)) { + if (dcachePort->sendTimingReq(retryPkt)) { // Don't finish the store unless this is the last packet. if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || state->pendingPacket == retryPkt) { diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 5dba51842..3d771e56b 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -234,7 +234,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt) new IprEvent(pkt, this, nextCycle(curTick() + delay)); _status = DcacheWaitResponse; dcache_pkt = NULL; - } else if (!dcachePort.sendTiming(pkt)) { + } else if (!dcachePort.sendTimingReq(pkt)) { _status = DcacheRetry; dcache_pkt = pkt; } else { @@ -449,7 +449,7 @@ TimingSimpleCPU::handleWritePacket() new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay)); _status = DcacheWaitResponse; dcache_pkt = NULL; - } else if (!dcachePort.sendTiming(dcache_pkt)) { + } else if (!dcachePort.sendTimingReq(dcache_pkt)) { _status = DcacheRetry; } else { _status = DcacheWaitResponse; @@ -581,7 +581,7 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) ifetch_pkt->dataStatic(&inst); DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); - if (!icachePort.sendTiming(ifetch_pkt)) { + if (!icachePort.sendTimingReq(ifetch_pkt)) { // Need to wait for retry _status = IcacheRetry; } else { @@ -715,9 +715,8 @@ TimingSimpleCPU::IcachePort::ITickEvent::process() } bool -TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) +TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); if (!pkt->wasNacked()) { DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); // delay processing of returned data until next CPU clock edge @@ -732,7 +731,7 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) } else { assert(cpu->_status == IcacheWaitResponse); pkt->reinitNacked(); - if (!sendTiming(pkt)) { + if (!sendTimingReq(pkt)) { cpu->_status = IcacheRetry; cpu->ifetch_pkt = pkt; } @@ -749,7 +748,7 @@ TimingSimpleCPU::IcachePort::recvRetry() assert(cpu->ifetch_pkt != NULL); assert(cpu->_status == IcacheRetry); PacketPtr tmp = cpu->ifetch_pkt; - if (sendTiming(tmp)) { + if (sendTimingReq(tmp)) { cpu->_status = IcacheWaitResponse; cpu->ifetch_pkt = NULL; } @@ -836,9 +835,8 @@ TimingSimpleCPU::completeDrain() } bool -TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) +TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); if (!pkt->wasNacked()) { // delay processing of returned data until next CPU clock edge Tick next_tick = cpu->nextCycle(curTick()); @@ -862,7 +860,7 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) } else { assert(cpu->_status == DcacheWaitResponse); pkt->reinitNacked(); - if (!sendTiming(pkt)) { + if (!sendTimingReq(pkt)) { cpu->_status = DcacheRetry; cpu->dcache_pkt = pkt; } @@ -896,7 +894,7 @@ TimingSimpleCPU::DcachePort::recvRetry() dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); assert(main_send_state); - if (sendTiming(tmp)) { + if (sendTimingReq(tmp)) { // If we were able to send without retrying, record that fact // and try sending the other fragment. send_state->clearFromParent(); @@ -914,7 +912,7 @@ TimingSimpleCPU::DcachePort::recvRetry() cpu->dcache_pkt = NULL; } } - } else if (sendTiming(tmp)) { + } else if (sendTimingReq(tmp)) { cpu->_status = DcacheWaitResponse; // memory system takes ownership of packet cpu->dcache_pkt = NULL; diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh index 4c23391d9..16bb554e2 100644 --- a/src/cpu/simple/timing.hh +++ b/src/cpu/simple/timing.hh @@ -156,7 +156,7 @@ class TimingSimpleCPU : public BaseSimpleCPU /** * Snooping a coherence request, do nothing. */ - virtual bool recvTimingSnoop(PacketPtr pkt) { return true; } + virtual void recvTimingSnoopReq(PacketPtr pkt) { } TimingSimpleCPU* cpu; @@ -185,7 +185,7 @@ class TimingSimpleCPU : public BaseSimpleCPU protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry(); @@ -212,7 +212,7 @@ class TimingSimpleCPU : public BaseSimpleCPU protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry(); diff --git a/src/cpu/testers/directedtest/InvalidateGenerator.cc b/src/cpu/testers/directedtest/InvalidateGenerator.cc index a89853061..84a90844f 100644 --- a/src/cpu/testers/directedtest/InvalidateGenerator.cc +++ b/src/cpu/testers/directedtest/InvalidateGenerator.cc @@ -80,7 +80,7 @@ InvalidateGenerator::initiate() *dummyData = 0; pkt->dataDynamic(dummyData); - if (port->sendTiming(pkt)) { + if (port->sendTimingReq(pkt)) { DPRINTF(DirectedTest, "initiating request - successful\n"); if (m_status == InvalidateGeneratorStatus_Load_Waiting) { m_status = InvalidateGeneratorStatus_Load_Pending; diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.cc b/src/cpu/testers/directedtest/RubyDirectedTester.cc index b5fe662af..0aba82dd2 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.cc +++ b/src/cpu/testers/directedtest/RubyDirectedTester.cc @@ -91,7 +91,7 @@ RubyDirectedTester::getMasterPort(const std::string &if_name, int idx) } bool -RubyDirectedTester::CpuPort::recvTiming(PacketPtr pkt) +RubyDirectedTester::CpuPort::recvTimingResp(PacketPtr pkt) { tester->hitCallback(id, pkt->getAddr()); diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.hh b/src/cpu/testers/directedtest/RubyDirectedTester.hh index 08b034d3f..75e72edf8 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.hh +++ b/src/cpu/testers/directedtest/RubyDirectedTester.hh @@ -59,7 +59,7 @@ class RubyDirectedTester : public MemObject {} protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry() { panic("%s does not expect a retry\n", name()); } }; diff --git a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc index b8c42b67d..abcb0278f 100644 --- a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc +++ b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc @@ -70,7 +70,7 @@ SeriesRequestGenerator::initiate() *dummyData = 0; pkt->dataDynamic(dummyData); - if (port->sendTiming(pkt)) { + if (port->sendTimingReq(pkt)) { DPRINTF(DirectedTest, "initiating request - successful\n"); m_status = SeriesRequestGeneratorStatus_Request_Pending; return true; diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index 809b4dd93..642af4677 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -53,9 +53,8 @@ using namespace std; int TESTER_ALLOCATOR=0; bool -MemTest::CpuPort::recvTiming(PacketPtr pkt) +MemTest::CpuPort::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); memtest->completeRequest(pkt); return true; } @@ -72,7 +71,7 @@ MemTest::sendPkt(PacketPtr pkt) { cachePort.sendAtomic(pkt); completeRequest(pkt); } - else if (!cachePort.sendTiming(pkt)) { + else if (!cachePort.sendTimingReq(pkt)) { DPRINTF(MemTest, "accessRetry setting to true\n"); // @@ -379,7 +378,7 @@ MemTest::tick() void MemTest::doRetry() { - if (cachePort.sendTiming(retryPkt)) { + if (cachePort.sendTimingReq(retryPkt)) { DPRINTF(MemTest, "accessRetry setting to false\n"); accessRetry = false; retryPkt = NULL; diff --git a/src/cpu/testers/memtest/memtest.hh b/src/cpu/testers/memtest/memtest.hh index 8dccfdc80..450a3e4f1 100644 --- a/src/cpu/testers/memtest/memtest.hh +++ b/src/cpu/testers/memtest/memtest.hh @@ -97,9 +97,9 @@ class MemTest : public MemObject protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); - virtual bool recvTimingSnoop(PacketPtr pkt) { return true; } + virtual void recvTimingSnoopReq(PacketPtr pkt) { } virtual Tick recvAtomicSnoop(PacketPtr pkt) { return 0; } diff --git a/src/cpu/testers/networktest/networktest.cc b/src/cpu/testers/networktest/networktest.cc index aa8b54b8e..5d0e8e0c9 100644 --- a/src/cpu/testers/networktest/networktest.cc +++ b/src/cpu/testers/networktest/networktest.cc @@ -51,9 +51,8 @@ using namespace std; int TESTER_NETWORK=0; bool -NetworkTest::CpuPort::recvTiming(PacketPtr pkt) +NetworkTest::CpuPort::recvTimingResp(PacketPtr pkt) { - assert(pkt->isResponse()); networktest->completeRequest(pkt); return true; } @@ -67,7 +66,7 @@ NetworkTest::CpuPort::recvRetry() void NetworkTest::sendPkt(PacketPtr pkt) { - if (!cachePort.sendTiming(pkt)) { + if (!cachePort.sendTimingReq(pkt)) { retryPkt = pkt; // RubyPort will retry sending } numPacketsSent++; @@ -269,7 +268,7 @@ NetworkTest::generatePkt() void NetworkTest::doRetry() { - if (cachePort.sendTiming(retryPkt)) { + if (cachePort.sendTimingReq(retryPkt)) { retryPkt = NULL; } } diff --git a/src/cpu/testers/networktest/networktest.hh b/src/cpu/testers/networktest/networktest.hh index 36d311aa8..8b7a89d6f 100644 --- a/src/cpu/testers/networktest/networktest.hh +++ b/src/cpu/testers/networktest/networktest.hh @@ -92,7 +92,7 @@ class NetworkTest : public MemObject protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry(); }; diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc index 8188fecbb..98250f042 100644 --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -114,7 +114,7 @@ Check::initiatePrefetch() pkt->senderState = new SenderState(m_address, req->getSize(), pkt->senderState); - if (port->sendTiming(pkt)) { + if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "successfully initiated prefetch.\n"); } else { // If the packet did not issue, must delete @@ -154,7 +154,7 @@ Check::initiateFlush() pkt->senderState = new SenderState(m_address, req->getSize(), pkt->senderState); - if (port->sendTiming(pkt)) { + if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "initiating Flush - successful\n"); } } @@ -201,7 +201,7 @@ Check::initiateAction() pkt->senderState = new SenderState(writeAddr, req->getSize(), pkt->senderState); - if (port->sendTiming(pkt)) { + if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "initiating action - successful\n"); DPRINTF(RubyTest, "status before action update: %s\n", (TesterStatus_to_string(m_status)).c_str()); @@ -253,7 +253,7 @@ Check::initiateCheck() pkt->senderState = new SenderState(m_address, req->getSize(), pkt->senderState); - if (port->sendTiming(pkt)) { + if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "initiating check - successful\n"); DPRINTF(RubyTest, "status before check update: %s\n", TesterStatus_to_string(m_status).c_str()); diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc index 2862a261d..3397b00d0 100644 --- a/src/cpu/testers/rubytest/RubyTester.cc +++ b/src/cpu/testers/rubytest/RubyTester.cc @@ -145,7 +145,7 @@ RubyTester::getMasterPort(const std::string &if_name, int idx) } bool -RubyTester::CpuPort::recvTiming(PacketPtr pkt) +RubyTester::CpuPort::recvTimingResp(PacketPtr pkt) { // retrieve the subblock and call hitCallback RubyTester::SenderState* senderState = diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh index 5d2202f65..8fbd886b3 100644 --- a/src/cpu/testers/rubytest/RubyTester.hh +++ b/src/cpu/testers/rubytest/RubyTester.hh @@ -62,7 +62,7 @@ class RubyTester : public MemObject {} protected: - virtual bool recvTiming(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry() { panic("%s does not expect a retry\n", name()); } }; |