summaryrefslogtreecommitdiff
path: root/src/mem/cache/mshr.cc
diff options
context:
space:
mode:
authorCurtis Dunham <Curtis.Dunham@arm.com>2014-10-21 17:04:41 -0500
committerCurtis Dunham <Curtis.Dunham@arm.com>2014-10-21 17:04:41 -0500
commit4024fab7fc16223018a48af4a19efeec865c889b (patch)
tree8a2285b72370de07b44e7b452f30ace2c19a32b2 /src/mem/cache/mshr.cc
parent46f9f11a5575a0ec8a69a1c0181b37668de65152 (diff)
downloadgem5-4024fab7fc16223018a48af4a19efeec865c889b.tar.xz
mem: don't inhibit WriteInv's or defer snoops on their MSHRs
WriteInvalidate semantics depend on the unconditional writeback or they won't complete. Also, there's no point in deferring snoops on their MSHRs, as they don't get new data at the end of their life cycle the way other transactions do. Add comment in the cache about a minor inefficiency re: WriteInvalidate.
Diffstat (limited to 'src/mem/cache/mshr.cc')
-rw-r--r--src/mem/cache/mshr.cc27
1 files changed, 21 insertions, 6 deletions
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index fcf9fc425..bc46ed267 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -62,10 +62,11 @@
using namespace std;
MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
- pendingDirty(false), postInvalidate(false), postDowngrade(false),
- _isObsolete(false), queue(NULL), order(0), addr(0), size(0),
- isSecure(false), inService(false), isForward(false),
- threadNum(InvalidThreadID), data(NULL)
+ pendingDirty(false), pendingClean(false),
+ postInvalidate(false), postDowngrade(false),
+ _isObsolete(false), queue(NULL), order(0), addr(0),
+ size(0), isSecure(false), inService(false),
+ isForward(false), threadNum(InvalidThreadID), data(NULL)
{
}
@@ -213,6 +214,7 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target, Tick whenReady,
isForward = false;
_isUncacheable = target->req->isUncacheable();
inService = false;
+ pendingClean = (target->cmd == MemCmd::WriteInvalidateReq);
downstreamPending = false;
_isObsolete = false;
threadNum = 0;
@@ -251,7 +253,8 @@ MSHR::markInService(PacketPtr pkt)
assert(pkt != NULL);
inService = true;
- pendingDirty = (targets.needsExclusive ||
+ pendingDirty = ((targets.needsExclusive &&
+ (pkt->cmd != MemCmd::WriteInvalidateReq)) ||
(!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
postInvalidate = postDowngrade = false;
@@ -367,7 +370,12 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
downstreamPending && targets.needsExclusive);
- if (isPendingDirty()) {
+ // WriteInvalidates must writeback and should not be inhibited on
+ // account of its snoops discovering MSHRs wanting exclusive access
+ // to what it wrote. We don't want to push this check higher,
+ // however, because we want to be sure to add an invalidating
+ // Target::FromSnoop, above.
+ if (isPendingDirty() && (pkt->cmd != MemCmd::WriteInvalidateReq)) {
pkt->assertMemInhibit();
pkt->setSupplyExclusive();
}
@@ -375,6 +383,13 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
if (pkt->needsExclusive()) {
// This transaction will take away our pending copy
postInvalidate = true;
+
+ // Do not defer (i.e. return true) the snoop if the block is
+ // going to be clean once the MSHR completes, as the data is
+ // ready now.
+ if (isPendingClean()) {
+ return false;
+ }
}
}