summaryrefslogtreecommitdiff
path: root/src/mem/cache/cache_impl.hh
diff options
context:
space:
mode:
authorCurtis Dunham <Curtis.Dunham@arm.com>2014-10-21 17:04:41 -0500
committerCurtis Dunham <Curtis.Dunham@arm.com>2014-10-21 17:04:41 -0500
commit4024fab7fc16223018a48af4a19efeec865c889b (patch)
tree8a2285b72370de07b44e7b452f30ace2c19a32b2 /src/mem/cache/cache_impl.hh
parent46f9f11a5575a0ec8a69a1c0181b37668de65152 (diff)
downloadgem5-4024fab7fc16223018a48af4a19efeec865c889b.tar.xz
mem: don't inhibit WriteInv's or defer snoops on their MSHRs
WriteInvalidate semantics depend on the unconditional writeback or they won't complete. Also, there's no point in deferring snoops on their MSHRs, as they don't get new data at the end of their life cycle the way other transactions do. Add comment in the cache about a minor inefficiency re: WriteInvalidate.
Diffstat (limited to 'src/mem/cache/cache_impl.hh')
-rw-r--r--src/mem/cache/cache_impl.hh10
1 files changed, 7 insertions, 3 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 4b51a6359..66abf6eff 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -345,7 +345,6 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
blk->status |= BlkSecure;
}
}
- std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
if (pkt->cmd == MemCmd::Writeback) {
blk->status |= BlkDirty;
if (pkt->isSupplyExclusive()) {
@@ -354,11 +353,11 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
// nothing else to do; writeback doesn't expect response
assert(!pkt->needsResponse());
} else if (pkt->cmd == MemCmd::WriteInvalidateReq) {
- assert(blk->isReadable()); // implicitly checks for Valid bit also
- blk->status |= (BlkDirty | BlkCanGoExclusive);
+ blk->status |= (BlkReadable | BlkDirty | BlkCanGoExclusive);
blk->status &= ~BlkWritable;
++fastWrites;
}
+ std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
incHitCount(pkt);
return true;
@@ -1517,6 +1516,11 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
// there are cases (such as failed store conditionals or
// compare-and-swaps) where we'll demand an exclusive copy but
// end up not writing it.
+ // Caveat: if a Read takes a value from a WriteInvalidate MSHR,
+ // it will get marked Dirty even though it is Clean (once the
+ // WriteInvalidate completes). This is due to insufficient meta-
+ // data and overly presumptive interpretation of the inhibit flag.
+ // The result is an unnecessary extra writeback.
if (pkt->memInhibitAsserted())
blk->status |= BlkDirty;
}