summaryrefslogtreecommitdiff
path: root/src/mem/cache
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2015-03-27 04:56:00 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2015-03-27 04:56:00 -0400
commita7a1e6004a0d2508913277b5c60d245fdcad2681 (patch)
treefa367fcbea69318b4cf16e40d1799f5a08365eff /src/mem/cache
parent801ce65eaeda04017ac0df544eaa4c8ffae98455 (diff)
downloadgem5-a7a1e6004a0d2508913277b5c60d245fdcad2681.tar.xz
mem: Ignore uncacheable MSHRs when finding matches
This patch changes how we search for matching MSHRs, ignoring any MSHR that is allocated for an uncacheable access. By doing so, this patch fixes a corner case in the MSHRs where incorrect data ended up being copied into a (cacheable) read packet due to a first uncacheable MSHR target of size 4, followed by a cacheable target to the same MSHR of size 64. The latter target was filled with nonsense data.
Diffstat (limited to 'src/mem/cache')
-rw-r--r--src/mem/cache/mshr.cc14
-rw-r--r--src/mem/cache/mshr_queue.cc11
2 files changed, 18 insertions, 7 deletions
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 79cf7a998..0915df23f 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -273,6 +273,15 @@ MSHR::deallocate()
void
MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
{
+ // assume we'd never issue a prefetch when we've got an
+ // outstanding miss
+ assert(pkt->cmd != MemCmd::HardPFReq);
+
+ // uncacheable accesses always allocate a new MSHR, and cacheable
+ // accesses ignore any uncacheable MSHRs, thus we should never
+ // have targets addded if originally allocated uncacheable
+ assert(!_isUncacheable);
+
// if there's a request already in service for this MSHR, we will
// have to defer the new target until after the response if any of
// the following are true:
@@ -283,11 +292,6 @@ MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
// getting an exclusive block back or we have already snooped
// another read request that will downgrade our exclusive block
// to shared
-
- // assume we'd never issue a prefetch when we've got an
- // outstanding miss
- assert(pkt->cmd != MemCmd::HardPFReq);
-
if (inService &&
(!deferredTargets.empty() || hasPostInvalidate() ||
(pkt->needsExclusive() &&
diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc
index 2b72cf339..f8587e1f1 100644
--- a/src/mem/cache/mshr_queue.cc
+++ b/src/mem/cache/mshr_queue.cc
@@ -69,7 +69,13 @@ MSHR *
MSHRQueue::findMatch(Addr blk_addr, bool is_secure) const
{
for (const auto& mshr : allocatedList) {
- if (mshr->blkAddr == blk_addr && mshr->isSecure == is_secure) {
+ // we ignore any MSHRs allocated for uncacheable accesses and
+ // simply ignore them when matching, in the cache we never
+ // check for matches when adding new uncacheable entries, and
+ // we do not want normal cacheable accesses being added to an
+ // MSHR serving an uncacheable access
+ if (!mshr->isUncacheable() && mshr->blkAddr == blk_addr &&
+ mshr->isSecure == is_secure) {
return mshr;
}
}
@@ -84,7 +90,8 @@ MSHRQueue::findMatches(Addr blk_addr, bool is_secure,
assert(matches.empty());
bool retval = false;
for (const auto& mshr : allocatedList) {
- if (mshr->blkAddr == blk_addr && mshr->isSecure == is_secure) {
+ if (!mshr->isUncacheable() && mshr->blkAddr == blk_addr &&
+ mshr->isSecure == is_secure) {
retval = true;
matches.push_back(mshr);
}