summaryrefslogtreecommitdiff
path: root/src/mem/cache
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2016-02-10 04:08:24 -0500
committerAndreas Hansson <andreas.hansson@arm.com>2016-02-10 04:08:24 -0500
commit986214f1816be2dc6f3758c4b80d8fbc945495b0 (patch)
treea2a087198ad96b4620e35f00e94e9c9c9d7ba5eb /src/mem/cache
parentfbdeb6031664d71e19a25f51b6ee882d803dac30 (diff)
downloadgem5-986214f1816be2dc6f3758c4b80d8fbc945495b0.tar.xz
mem: Align how snoops are handled when hitting writebacks
This patch unifies the snoop handling in case of hitting writebacks with how we handle snoops hitting in the tags. As a result, we end up using the same optimisation as the normal snoops, where we inform the downstream cache if we encounter a line in Modified (writable and dirty) state, which enables us to avoid sending out express snoops to invalidate any Shared copies of the line. A few regressions consequently change, as some transactions are sunk higher up in the cache hierarchy.
Diffstat (limited to 'src/mem/cache')
-rw-r--r--src/mem/cache/cache.cc58
1 files changed, 22 insertions, 36 deletions
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 294f807ba..7a5d22ef0 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -2152,48 +2152,34 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
return;
}
- if (wb_pkt->cmd == MemCmd::WritebackDirty) {
- // we have dirty data, and so will proceed to respond
+ // conceptually writebacks are no different to other blocks in
+ // this cache, so the behaviour is modelled after handleSnoop,
+ // the difference being that instead of querying the block
+ // state to determine if it is dirty and writable, we use the
+ // command and fields of the writeback packet
+ bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
+ pkt->needsResponse() && pkt->cmd != MemCmd::InvalidateReq;
+ bool have_writable = !wb_pkt->hasSharers();
+ bool invalidate = pkt->isInvalidate();
+
+ if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
+ assert(!pkt->needsWritable());
+ pkt->setHasSharers();
+ wb_pkt->setHasSharers();
+ }
+
+ if (respond) {
pkt->setCacheResponding();
- if (!pkt->needsWritable()) {
- // the packet should end up in the Shared state (non
- // writable) on the completion of the fill
- pkt->setHasSharers();
- // similarly, the writeback is no longer passing
- // writeable (the receiving cache should consider the
- // block Owned rather than Modified)
- wb_pkt->setHasSharers();
- } else {
- // we need to invalidate our copy. we do that
- // below.
- assert(pkt->isInvalidate());
+
+ if (have_writable) {
+ pkt->setResponderHadWritable();
}
+
doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
false, false);
- } else {
- // on hitting a clean writeback we play it safe and do not
- // provide a response, the block may be dirty somewhere
- // else
- assert(wb_pkt->isCleanEviction());
- // The cache technically holds the block until the
- // corresponding message reaches the crossbar
- // below. Therefore when a snoop encounters a CleanEvict
- // or WritebackClean message we must call
- // setHasSharers (just like when it encounters a
- // Writeback) to avoid the snoop filter prematurely
- // clearing the holder bit in the crossbar below
- if (!pkt->needsWritable()) {
- pkt->setHasSharers();
- // the writeback is no longer passing writeable (the
- // receiving cache should consider the block Owned
- // rather than Modified)
- wb_pkt->setHasSharers();
- } else {
- assert(pkt->isInvalidate());
- }
}
- if (pkt->isInvalidate()) {
+ if (invalidate) {
// Invalidation trumps our writeback... discard here
// Note: markInService will remove entry from writeback buffer.
markInService(wb_entry, false);