diff options
author | Curtis Dunham <Curtis.Dunham@arm.com> | 2014-10-21 17:04:41 -0500 |
---|---|---|
committer | Curtis Dunham <Curtis.Dunham@arm.com> | 2014-10-21 17:04:41 -0500 |
commit | 4024fab7fc16223018a48af4a19efeec865c889b (patch) | |
tree | 8a2285b72370de07b44e7b452f30ace2c19a32b2 /src/mem | |
parent | 46f9f11a5575a0ec8a69a1c0181b37668de65152 (diff) | |
download | gem5-4024fab7fc16223018a48af4a19efeec865c889b.tar.xz |
mem: don't inhibit WriteInv's or defer snoops on their MSHRs
WriteInvalidate semantics depend on the unconditional writeback
or they won't complete. Also, there's no point in deferring snoops
on their MSHRs, as they don't get new data at the end of their life
cycle the way other transactions do.
Add comment in the cache about a minor inefficiency re: WriteInvalidate.
Diffstat (limited to 'src/mem')
-rw-r--r-- | src/mem/cache/cache_impl.hh | 10 | ||||
-rw-r--r-- | src/mem/cache/mshr.cc | 27 | ||||
-rw-r--r-- | src/mem/cache/mshr.hh | 7 |
3 files changed, 35 insertions, 9 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 4b51a6359..66abf6eff 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -345,7 +345,6 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, blk->status |= BlkSecure; } } - std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize); if (pkt->cmd == MemCmd::Writeback) { blk->status |= BlkDirty; if (pkt->isSupplyExclusive()) { @@ -354,11 +353,11 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, // nothing else to do; writeback doesn't expect response assert(!pkt->needsResponse()); } else if (pkt->cmd == MemCmd::WriteInvalidateReq) { - assert(blk->isReadable()); // implicitly checks for Valid bit also - blk->status |= (BlkDirty | BlkCanGoExclusive); + blk->status |= (BlkReadable | BlkDirty | BlkCanGoExclusive); blk->status &= ~BlkWritable; ++fastWrites; } + std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize); DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); incHitCount(pkt); return true; @@ -1517,6 +1516,11 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk, // there are cases (such as failed store conditionals or // compare-and-swaps) where we'll demand an exclusive copy but // end up not writing it. + // Caveat: if a Read takes a value from a WriteInvalidate MSHR, + // it will get marked Dirty even though it is Clean (once the + // WriteInvalidate completes). This is due to insufficient meta- + // data and overly presumptive interpretation of the inhibit flag. + // The result is an unnecessary extra writeback. if (pkt->memInhibitAsserted()) blk->status |= BlkDirty; } diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc index fcf9fc425..bc46ed267 100644 --- a/src/mem/cache/mshr.cc +++ b/src/mem/cache/mshr.cc @@ -62,10 +62,11 @@ using namespace std; MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false), - pendingDirty(false), postInvalidate(false), postDowngrade(false), - _isObsolete(false), queue(NULL), order(0), addr(0), size(0), - isSecure(false), inService(false), isForward(false), - threadNum(InvalidThreadID), data(NULL) + pendingDirty(false), pendingClean(false), + postInvalidate(false), postDowngrade(false), + _isObsolete(false), queue(NULL), order(0), addr(0), + size(0), isSecure(false), inService(false), + isForward(false), threadNum(InvalidThreadID), data(NULL) { } @@ -213,6 +214,7 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target, Tick whenReady, isForward = false; _isUncacheable = target->req->isUncacheable(); inService = false; + pendingClean = (target->cmd == MemCmd::WriteInvalidateReq); downstreamPending = false; _isObsolete = false; threadNum = 0; @@ -251,7 +253,8 @@ MSHR::markInService(PacketPtr pkt) assert(pkt != NULL); inService = true; - pendingDirty = (targets.needsExclusive || + pendingDirty = ((targets.needsExclusive && + (pkt->cmd != MemCmd::WriteInvalidateReq)) || (!pkt->sharedAsserted() && pkt->memInhibitAsserted())); postInvalidate = postDowngrade = false; @@ -367,7 +370,12 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order) targets.add(cp_pkt, curTick(), _order, Target::FromSnoop, downstreamPending && targets.needsExclusive); - if (isPendingDirty()) { + // WriteInvalidates must writeback and should not be inhibited on + // account of its snoops discovering MSHRs wanting exclusive access + // to what it wrote. We don't want to push this check higher, + // however, because we want to be sure to add an invalidating + // Target::FromSnoop, above. + if (isPendingDirty() && (pkt->cmd != MemCmd::WriteInvalidateReq)) { pkt->assertMemInhibit(); pkt->setSupplyExclusive(); } @@ -375,6 +383,13 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order) if (pkt->needsExclusive()) { // This transaction will take away our pending copy postInvalidate = true; + + // Do not defer (i.e. return true) the snoop if the block is + // going to be clean once the MSHR completes, as the data is + // ready now. + if (isPendingClean()) { + return false; + } } } diff --git a/src/mem/cache/mshr.hh b/src/mem/cache/mshr.hh index 9ea9cfb2c..e5a15b61d 100644 --- a/src/mem/cache/mshr.hh +++ b/src/mem/cache/mshr.hh @@ -83,6 +83,9 @@ class MSHR : public Packet::SenderState, public Printable /** Will we have a dirty copy after this request? */ bool pendingDirty; + /** Will we have a clean copy after this request? (i.e. is writeback) */ + bool pendingClean; + /** Did we snoop an invalidate while waiting for data? */ bool postInvalidate; @@ -179,6 +182,10 @@ class MSHR : public Packet::SenderState, public Printable assert(inService); return pendingDirty; } + bool isPendingClean() const { + return pendingClean; + } + bool hasPostInvalidate() const { assert(inService); return postInvalidate; } |