summaryrefslogtreecommitdiff
path: root/src/cpu/testers/traffic_gen/traffic_gen.cc
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2013-05-30 12:54:06 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2013-05-30 12:54:06 -0400
commitfc09bc8678b5e78d553e009105c58e5c5d5befb4 (patch)
tree6d5ff719387cd2e24d549f575576589dbfd98f6e /src/cpu/testers/traffic_gen/traffic_gen.cc
parent4931414ca79b97ee64a958c4dd4ed1cafc44b4bd (diff)
downloadgem5-fc09bc8678b5e78d553e009105c58e5c5d5befb4.tar.xz
cpu: Add request elasticity to the traffic generator
This patch adds an optional request elasticity to the traffic generator, effectievly compensating for it in the case of the linear and random generators, and adding it in the case of the trace generator. The accounting is left with the top-level traffic generator, and the individual generators do the necessary math as part of determining the next packet tick. Note that in the linear and random generators we have to compensate for the blocked time to not be elastic, i.e. without this patch the aforementioned generators will slow down in the case of back-pressure.
Diffstat (limited to 'src/cpu/testers/traffic_gen/traffic_gen.cc')
-rw-r--r--src/cpu/testers/traffic_gen/traffic_gen.cc13
1 files changed, 8 insertions, 5 deletions
diff --git a/src/cpu/testers/traffic_gen/traffic_gen.cc b/src/cpu/testers/traffic_gen/traffic_gen.cc
index 8916dcb8d..f5835f8f4 100644
--- a/src/cpu/testers/traffic_gen/traffic_gen.cc
+++ b/src/cpu/testers/traffic_gen/traffic_gen.cc
@@ -55,6 +55,7 @@ TrafficGen::TrafficGen(const TrafficGenParams* p)
system(p->system),
masterID(system->getMasterId(name())),
configFile(p->config_file),
+ elasticReq(p->elastic_req),
nextTransitionTick(0),
nextPacketTick(0),
port(name() + ".port", *this),
@@ -107,7 +108,7 @@ TrafficGen::initState()
// when not restoring from a checkpoint, make sure we kick things off
if (system->isTimingMode()) {
// call nextPacketTick on the state to advance it
- nextPacketTick = states[currState]->nextPacketTick();
+ nextPacketTick = states[currState]->nextPacketTick(elasticReq, 0);
schedule(updateEvent, std::min(nextPacketTick, nextTransitionTick));
} else {
DPRINTF(TrafficGen,
@@ -165,7 +166,7 @@ TrafficGen::unserialize(Checkpoint* cp, const string& section)
// @todo In the case of a stateful generator state such as the
// trace player we would also have to restore the position in the
- // trace playback
+ // trace playback and the tick offset
UNSERIALIZE_SCALAR(currState);
}
@@ -193,7 +194,7 @@ TrafficGen::update()
if (retryPkt == NULL) {
// schedule next update event based on either the next execute
// tick or the next transition, which ever comes first
- nextPacketTick = states[currState]->nextPacketTick();
+ nextPacketTick = states[currState]->nextPacketTick(elasticReq, 0);
Tick nextEventTick = std::min(nextPacketTick, nextTransitionTick);
DPRINTF(TrafficGen, "Next event scheduled at %lld\n", nextEventTick);
schedule(updateEvent, nextEventTick);
@@ -386,14 +387,16 @@ TrafficGen::recvRetry()
if (port.sendTimingReq(retryPkt)) {
retryPkt = NULL;
// remember how much delay was incurred due to back-pressure
- // when sending the request
+ // when sending the request, we also use this to derive
+ // the tick for the next packet
Tick delay = curTick() - retryPktTick;
retryPktTick = 0;
retryTicks += delay;
if (drainManager == NULL) {
// packet is sent, so find out when the next one is due
- nextPacketTick = states[currState]->nextPacketTick();
+ nextPacketTick = states[currState]->nextPacketTick(elasticReq,
+ delay);
Tick nextEventTick = std::min(nextPacketTick, nextTransitionTick);
schedule(updateEvent, std::max(curTick(), nextEventTick));
} else {