summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
authorDam Sunwoo <dam.sunwoo@arm.com>2013-04-22 13:20:31 -0400
committerDam Sunwoo <dam.sunwoo@arm.com>2013-04-22 13:20:31 -0400
commite8381142b061fbdf2f22d958f1c7559e9ffb3bd8 (patch)
tree452e96de7f6322d62f325992a9ce5d1d2a3c3bbb /src/mem
parent2c1e34431326381833de289b1d90f2427ba16c98 (diff)
downloadgem5-e8381142b061fbdf2f22d958f1c7559e9ffb3bd8.tar.xz
sim: separate nextCycle() and clockEdge() in clockedObjects
Previously, nextCycle() could return the *current* cycle if the current tick was already aligned with the clock edge. This behavior is not only confusing (not quite what the function name implies), but also caused problems in the drainResume() function. When exiting/re-entering the sim loop (e.g., to take checkpoints), the CPUs will drain and resume. Due to the previous behavior of nextCycle(), the CPU tick events were being rescheduled in the same ticks that were already processed before draining. This caused divergence from runs that did not exit/re-entered the sim loop. (Initially a cycle difference, but a significant impact later on.) This patch separates out the two behaviors (nextCycle() and clockEdge()), uses nextCycle() in drainResume, and uses clockEdge() everywhere else. Nothing (other than name) should change except for the drainResume timing.
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/bridge.cc4
-rw-r--r--src/mem/bus.cc2
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.cc2
3 files changed, 4 insertions, 4 deletions
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc
index 1a8437aa1..91bef2757 100644
--- a/src/mem/bridge.cc
+++ b/src/mem/bridge.cc
@@ -277,7 +277,7 @@ Bridge::BridgeMasterPort::trySendTiming()
req = transmitList.front();
DPRINTF(Bridge, "Scheduling next send\n");
bridge.schedule(sendEvent, std::max(req.tick,
- bridge.nextCycle()));
+ bridge.clockEdge()));
}
// if we have stalled a request due to a full request queue,
@@ -318,7 +318,7 @@ Bridge::BridgeSlavePort::trySendTiming()
resp = transmitList.front();
DPRINTF(Bridge, "Scheduling next send\n");
bridge.schedule(sendEvent, std::max(resp.tick,
- bridge.nextCycle()));
+ bridge.clockEdge()));
}
// if there is space in the request queue and we were stalling
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index 368d49c86..8546df565 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -135,7 +135,7 @@ BaseBus::calcPacketTiming(PacketPtr pkt)
// the bus will be called at a time that is not necessarily
// coinciding with its own clock, so start by determining how long
// until the next clock edge (could be zero)
- Tick offset = nextCycle() - curTick();
+ Tick offset = clockEdge() - curTick();
// determine how many cycles are needed to send the data
unsigned dataCycles = pkt->hasData() ? divCeil(pkt->getSize(), width) : 0;
diff --git a/src/mem/ruby/system/RubyMemoryControl.cc b/src/mem/ruby/system/RubyMemoryControl.cc
index 75e6e1b06..5ffc60e2b 100644
--- a/src/mem/ruby/system/RubyMemoryControl.cc
+++ b/src/mem/ruby/system/RubyMemoryControl.cc
@@ -307,7 +307,7 @@ RubyMemoryControl::enqueueMemRef(MemoryNode& memRef)
m_input_queue.push_back(memRef);
if (!m_event.scheduled()) {
- schedule(m_event, nextCycle());
+ schedule(m_event, clockEdge());
}
}