summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2013-03-26 14:46:46 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2013-03-26 14:46:46 -0400
commit362f6f1a16a68a99c962628bcda00c7c576f935c (patch)
treea3df8a6d05340a65d190fcf166091b3f1aed4ae6 /src
parent2123176684e3967912126125c65319dffdfa7467 (diff)
downloadgem5-362f6f1a16a68a99c962628bcda00c7c576f935c.tar.xz
mem: Introduce a variable for the retrying port
This patch introduces a variable to keep track of the retrying port instead of relying on it being the front of the retryList. Besides the improvement in readability, this patch is a step towards separating out the two cases where a port is waiting for the bus to be free, and where the forwarding did not succeed and the bus is waiting for a retry to pass on to the original initiator of the transaction. The changes made are currently such that the regressions are not affected. This is ensured by always prioritizing the currently retrying port and putting it back at the front of the retry list.
Diffstat (limited to 'src')
-rw-r--r--src/mem/bus.cc50
-rw-r--r--src/mem/bus.hh8
2 files changed, 41 insertions, 17 deletions
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index d29422593..88b91983b 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -160,7 +160,7 @@ template <typename PortClass>
BaseBus::Layer<PortClass>::Layer(BaseBus& _bus, const std::string& _name) :
Drainable(),
bus(_bus), _name(_name), state(IDLE), drainManager(NULL),
- releaseEvent(this)
+ retryingPort(NULL), releaseEvent(this)
{
}
@@ -192,12 +192,17 @@ BaseBus::Layer<PortClass>::tryTiming(PortClass* port)
{
// first we see if the bus is busy, next we check if we are in a
// retry with a port other than the current one
- if (state == BUSY || (state == RETRY && port != retryList.front())) {
+ if (state == BUSY || (state == RETRY && port != retryingPort)) {
// put the port at the end of the retry list
retryList.push_back(port);
return false;
}
+ // @todo: here we should no longer consider this port retrying
+ // once we can differentiate retries due to a busy bus and a
+ // failed forwarding, for now keep it so we can stick it back at
+ // the front of the retry list if needed
+
// update the state which is shared for request, response and
// snoop responses, if we were idle we are now busy, if we are in
// a retry, then do not change
@@ -211,12 +216,13 @@ template <typename PortClass>
void
BaseBus::Layer<PortClass>::succeededTiming(Tick busy_time)
{
- // if a retrying port succeeded, also take it off the retry list
+ // if a retrying port succeeded, update the state and reset the
+ // retrying port
if (state == RETRY) {
- DPRINTF(BaseBus, "Remove retry from list %s\n",
- retryList.front()->name());
- retryList.pop_front();
+ DPRINTF(BaseBus, "Succeeded retry from %s\n",
+ retryingPort->name());
state = BUSY;
+ retryingPort = NULL;
}
// we should either have gone from idle to busy in the
@@ -231,10 +237,17 @@ template <typename PortClass>
void
BaseBus::Layer<PortClass>::failedTiming(PortClass* port, Tick busy_time)
{
- // if we are not in a retry, i.e. busy (but never idle), or we are
- // in a retry but not for the current port, then add the port at
- // the end of the retry list
- if (state != RETRY || port != retryList.front()) {
+ // if the current failing port is the retrying one, then for now stick it
+ // back at the front of the retry list to not change any regressions
+ if (state == RETRY) {
+ // we should never see a retry from any port but the current
+ // retry port at this point
+ assert(port == retryingPort);
+ retryList.push_front(port);
+ retryingPort = NULL;
+ } else {
+ // if we are not in a retry, i.e. busy (but never idle), then
+ // add the port at the end of the retry list
retryList.push_back(port);
}
@@ -283,25 +296,28 @@ BaseBus::Layer<PortClass>::retryWaiting()
// we always go to retrying from idle
assert(state == IDLE);
- // update the state which is shared for request, response and
- // snoop responses
+ // update the state
state = RETRY;
+ // set the retrying port to the front of the retry list and pop it
+ // off the list
+ assert(retryingPort == NULL);
+ retryingPort = retryList.front();
+ retryList.pop_front();
+
// note that we might have blocked on the receiving port being
// busy (rather than the bus itself) and now call retry before the
// destination called retry on the bus
- retryList.front()->sendRetry();
+ retryingPort->sendRetry();
// If the bus is still in the retry state, sendTiming wasn't
// called in zero time (e.g. the cache does this)
if (state == RETRY) {
- retryList.pop_front();
-
//Burn a cycle for the missed grant.
- // update the state which is shared for request, response and
- // snoop responses
+ // update the state to busy and reset the retrying port
state = BUSY;
+ retryingPort = NULL;
// occupy the bus layer until the next cycle ends
occupyLayer(bus.clockEdge(Cycles(1)));
diff --git a/src/mem/bus.hh b/src/mem/bus.hh
index 705a3a999..2cd21ff85 100644
--- a/src/mem/bus.hh
+++ b/src/mem/bus.hh
@@ -212,6 +212,14 @@ class BaseBus : public MemObject
std::deque<PortClass*> retryList;
/**
+ * Port that we are currently in the process of telling to
+ * retry a previously failed attempt to perform a timing
+ * transaction. This is a valid port when in the retry state,
+ * and NULL when in busy or idle.
+ */
+ PortClass* retryingPort;
+
+ /**
* Release the bus layer after being occupied and return to an
* idle state where we proceed to send a retry to any
* potential waiting port, or drain if asked to do so.