summaryrefslogtreecommitdiff
path: root/src/cpu/simple
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2015-02-03 14:25:27 -0500
committerAndreas Hansson <andreas.hansson@arm.com>2015-02-03 14:25:27 -0500
commit20111ba9171378bbf3bfd2f4628d7e8a0e9cbd3b (patch)
tree4691728d05d2e5e5435501bdee73abe1bd67ba6f /src/cpu/simple
parent3e33786db86c894b0e34bf018cbce412f9807447 (diff)
downloadgem5-20111ba9171378bbf3bfd2f4628d7e8a0e9cbd3b.tar.xz
cpu: Ensure timing CPU sinks response before sending new request
This patch changes how the timing CPU deals with processing responses, always scheduling an event, even if it is for the current tick. This helps to avoid situations where a new request shows up before a response is finished in the crossbar, and also is more in line with any realistic behaviour.
Diffstat (limited to 'src/cpu/simple')
-rw-r--r--src/cpu/simple/timing.cc41
1 files changed, 18 insertions, 23 deletions
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 6de6899e7..9171395b0 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -718,14 +718,12 @@ TimingSimpleCPU::IcachePort::ITickEvent::process()
bool
TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{
- DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
+ DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr());
+ // we should only ever see one response per cycle since we only
+ // issue a new request once this response is sunk
+ assert(!tickEvent.scheduled());
// delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->clockEdge();
-
- if (next_tick == curTick())
- cpu->completeIfetch(pkt);
- else
- tickEvent.schedule(pkt, next_tick);
+ tickEvent.schedule(pkt, cpu->clockEdge());
return true;
}
@@ -836,25 +834,22 @@ TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
bool
TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
{
- // delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->clockEdge();
+ DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr());
- if (next_tick == curTick()) {
- cpu->completeDataAccess(pkt);
+ // The timing CPU is not really ticked, instead it relies on the
+ // memory system (fetch and load/store) to set the pace.
+ if (!tickEvent.scheduled()) {
+ // Delay processing of returned data until next CPU clock edge
+ tickEvent.schedule(pkt, cpu->clockEdge());
+ return true;
} else {
- if (!tickEvent.scheduled()) {
- tickEvent.schedule(pkt, next_tick);
- } else {
- // In the case of a split transaction and a cache that is
- // faster than a CPU we could get two responses before
- // next_tick expires
- if (!retryEvent.scheduled())
- cpu->schedule(retryEvent, next_tick);
- return false;
- }
+ // In the case of a split transaction and a cache that is
+ // faster than a CPU we could get two responses in the
+ // same tick, delay the second one
+ if (!retryEvent.scheduled())
+ cpu->schedule(retryEvent, cpu->clockEdge(Cycles(1)));
+ return false;
}
-
- return true;
}
void