summaryrefslogtreecommitdiff
path: root/src/mem/bus.cc
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2013-03-26 14:46:46 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2013-03-26 14:46:46 -0400
commit362f6f1a16a68a99c962628bcda00c7c576f935c (patch)
treea3df8a6d05340a65d190fcf166091b3f1aed4ae6 /src/mem/bus.cc
parent2123176684e3967912126125c65319dffdfa7467 (diff)
downloadgem5-362f6f1a16a68a99c962628bcda00c7c576f935c.tar.xz
mem: Introduce a variable for the retrying port
This patch introduces a variable to keep track of the retrying port instead of relying on it being the front of the retryList. Besides the improvement in readability, this patch is a step towards separating out the two cases where a port is waiting for the bus to be free, and where the forwarding did not succeed and the bus is waiting for a retry to pass on to the original initiator of the transaction. The changes made are currently such that the regressions are not affected. This is ensured by always prioritizing the currently retrying port and putting it back at the front of the retry list.
Diffstat (limited to 'src/mem/bus.cc')
-rw-r--r--src/mem/bus.cc50
1 files changed, 33 insertions, 17 deletions
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index d29422593..88b91983b 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -160,7 +160,7 @@ template <typename PortClass>
BaseBus::Layer<PortClass>::Layer(BaseBus& _bus, const std::string& _name) :
Drainable(),
bus(_bus), _name(_name), state(IDLE), drainManager(NULL),
- releaseEvent(this)
+ retryingPort(NULL), releaseEvent(this)
{
}
@@ -192,12 +192,17 @@ BaseBus::Layer<PortClass>::tryTiming(PortClass* port)
{
// first we see if the bus is busy, next we check if we are in a
// retry with a port other than the current one
- if (state == BUSY || (state == RETRY && port != retryList.front())) {
+ if (state == BUSY || (state == RETRY && port != retryingPort)) {
// put the port at the end of the retry list
retryList.push_back(port);
return false;
}
+ // @todo: here we should no longer consider this port retrying
+ // once we can differentiate retries due to a busy bus and a
+ // failed forwarding, for now keep it so we can stick it back at
+ // the front of the retry list if needed
+
// update the state which is shared for request, response and
// snoop responses, if we were idle we are now busy, if we are in
// a retry, then do not change
@@ -211,12 +216,13 @@ template <typename PortClass>
void
BaseBus::Layer<PortClass>::succeededTiming(Tick busy_time)
{
- // if a retrying port succeeded, also take it off the retry list
+ // if a retrying port succeeded, update the state and reset the
+ // retrying port
if (state == RETRY) {
- DPRINTF(BaseBus, "Remove retry from list %s\n",
- retryList.front()->name());
- retryList.pop_front();
+ DPRINTF(BaseBus, "Succeeded retry from %s\n",
+ retryingPort->name());
state = BUSY;
+ retryingPort = NULL;
}
// we should either have gone from idle to busy in the
@@ -231,10 +237,17 @@ template <typename PortClass>
void
BaseBus::Layer<PortClass>::failedTiming(PortClass* port, Tick busy_time)
{
- // if we are not in a retry, i.e. busy (but never idle), or we are
- // in a retry but not for the current port, then add the port at
- // the end of the retry list
- if (state != RETRY || port != retryList.front()) {
+ // if the current failing port is the retrying one, then for now stick it
+ // back at the front of the retry list to not change any regressions
+ if (state == RETRY) {
+ // we should never see a retry from any port but the current
+ // retry port at this point
+ assert(port == retryingPort);
+ retryList.push_front(port);
+ retryingPort = NULL;
+ } else {
+ // if we are not in a retry, i.e. busy (but never idle), then
+ // add the port at the end of the retry list
retryList.push_back(port);
}
@@ -283,25 +296,28 @@ BaseBus::Layer<PortClass>::retryWaiting()
// we always go to retrying from idle
assert(state == IDLE);
- // update the state which is shared for request, response and
- // snoop responses
+ // update the state
state = RETRY;
+ // set the retrying port to the front of the retry list and pop it
+ // off the list
+ assert(retryingPort == NULL);
+ retryingPort = retryList.front();
+ retryList.pop_front();
+
// note that we might have blocked on the receiving port being
// busy (rather than the bus itself) and now call retry before the
// destination called retry on the bus
- retryList.front()->sendRetry();
+ retryingPort->sendRetry();
// If the bus is still in the retry state, sendTiming wasn't
// called in zero time (e.g. the cache does this)
if (state == RETRY) {
- retryList.pop_front();
-
//Burn a cycle for the missed grant.
- // update the state which is shared for request, response and
- // snoop responses
+ // update the state to busy and reset the retrying port
state = BUSY;
+ retryingPort = NULL;
// occupy the bus layer until the next cycle ends
occupyLayer(bus.clockEdge(Cycles(1)));