summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2014-09-03 07:42:50 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2014-09-03 07:42:50 -0400
commit3be4f4b846f991c98fe1909631996c5b58d52437 (patch)
treed9e603e739e999def333ae04c2386af0c1dd1fdb
parent5d029463eef3cfafa507dacadccd33402f82029a (diff)
downloadgem5-3be4f4b846f991c98fe1909631996c5b58d52437.tar.xz
mem: Fix a bug in the cache port flow control
This patch fixes a bug in the cache port where the retry flag was reset too early, allowing new requests to arrive before the retry was actually sent, but with the event already scheduled. This caused a deadlock in the interactions with the O3 LSQ. The patche fixes the underlying issue by shifting the resetting of the flag to be done by the event that also calls sendRetry(). The patch also tidies up the flow control in recvTimingReq and ensures that we also check if we already have a retry outstanding.
-rw-r--r--src/mem/cache/base.cc11
-rw-r--r--src/mem/cache/base.hh5
-rw-r--r--src/mem/cache/cache_impl.hh27
3 files changed, 32 insertions, 11 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index dd1306270..2a285bf2f 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -106,13 +106,20 @@ BaseCache::CacheSlavePort::clearBlocked()
DPRINTF(CachePort, "Cache port %s accepting new requests\n", name());
blocked = false;
if (mustSendRetry) {
- DPRINTF(CachePort, "Cache port %s sending retry\n", name());
- mustSendRetry = false;
// @TODO: need to find a better time (next bus cycle?)
owner.schedule(sendRetryEvent, curTick() + 1);
}
}
+void
+BaseCache::CacheSlavePort::processSendRetry()
+{
+ DPRINTF(CachePort, "Cache port %s sending retry\n", name());
+
+ // reset the flag and call retry
+ mustSendRetry = false;
+ sendRetry();
+}
void
BaseCache::init()
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index c1c77cde9..1567aaa62 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -182,7 +182,10 @@ class BaseCache : public MemObject
private:
- EventWrapper<SlavePort, &SlavePort::sendRetry> sendRetryEvent;
+ void processSendRetry();
+
+ EventWrapper<CacheSlavePort,
+ &CacheSlavePort::processSendRetry> sendRetryEvent;
};
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 91cb5a4e3..1a72f285f 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -1937,16 +1937,27 @@ template<class TagStore>
bool
Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
{
- // always let inhibited requests through even if blocked
- if (!pkt->memInhibitAsserted() && blocked) {
- assert(!cache->system->bypassCaches());
- DPRINTF(Cache,"Scheduling a retry while blocked\n");
- mustSendRetry = true;
- return false;
+ assert(!cache->system->bypassCaches());
+
+ bool success = false;
+
+ // always let inhibited requests through, even if blocked
+ if (pkt->memInhibitAsserted()) {
+ // this should always succeed
+ success = cache->recvTimingReq(pkt);
+ assert(success);
+ } else if (blocked || mustSendRetry) {
+ // either already committed to send a retry, or blocked
+ success = false;
+ } else {
+ // for now this should always succeed
+ success = cache->recvTimingReq(pkt);
+ assert(success);
}
- cache->recvTimingReq(pkt);
- return true;
+ // remember if we have to retry
+ mustSendRetry = !success;
+ return success;
}
template<class TagStore>