summaryrefslogtreecommitdiff
path: root/src/mem/cache/cache_impl.hh
diff options
context:
space:
mode:
authorMitch Hayenga <mitch.hayenga@arm.com>2014-05-09 18:58:46 -0400
committerMitch Hayenga <mitch.hayenga@arm.com>2014-05-09 18:58:46 -0400
commita15b713cba52d9d4d2c1204fef050fb3856ca33e (patch)
tree4bd5a1e3fda3aa33feb10cc475f8b8554f3d0958 /src/mem/cache/cache_impl.hh
parentb9e6c260a01bab2b59d6eef4f45a642f57484275 (diff)
downloadgem5-a15b713cba52d9d4d2c1204fef050fb3856ca33e.tar.xz
mem: Squash prefetch requests from downstream caches
This patch squashes prefetch requests from downstream caches, so that they do not steal cachelines away from caches closer to the cpu. It was originally coded by Mitch Hayenga and modified by Aasheesh Kolli.
Diffstat (limited to 'src/mem/cache/cache_impl.hh')
-rw-r--r--src/mem/cache/cache_impl.hh39
1 files changed, 39 insertions, 0 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index e8e1876a0..00ba0d24f 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -1394,6 +1394,12 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
if (snoopPkt.sharedAsserted()) {
pkt->assertShared();
}
+ // If this request is a prefetch and an
+ // upper level squashes the prefetch request,
+ // make sure to propogate the squash to the requester.
+ if (snoopPkt.prefetchSquashed()) {
+ pkt->setPrefetchSquashed();
+ }
} else {
cpuSidePort->sendAtomicSnoop(pkt);
if (!alreadyResponded && pkt->memInhibitAsserted()) {
@@ -1420,6 +1426,17 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
bool respond = blk->isDirty() && pkt->needsResponse();
bool have_exclusive = blk->isWritable();
+ // Invalidate any prefetch's from below that would strip write permissions
+ // MemCmd::HardPFReq is only observed by upstream caches. After missing
+ // above and in it's own cache, a new MemCmd::ReadReq is created that
+ // downstream caches observe.
+ if (pkt->cmd == MemCmd::HardPFReq) {
+ DPRINTF(Cache, "Squashing prefetch from lower cache %#x\n",
+ pkt->getAddr());
+ pkt->setPrefetchSquashed();
+ return;
+ }
+
if (pkt->isRead() && !invalidate) {
assert(!needs_exclusive);
pkt->assertShared();
@@ -1503,6 +1520,14 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
+ // Squash any prefetch requests from below on MSHR hits
+ if (mshr && pkt->cmd == MemCmd::HardPFReq) {
+ DPRINTF(Cache, "Squashing prefetch from lower cache on mshr hit %#x\n",
+ pkt->getAddr());
+ pkt->setPrefetchSquashed();
+ return;
+ }
+
// Let the MSHR itself track the snoop and decide whether we want
// to go ahead and do the regular cache snoop
if (mshr && mshr->handleSnoop(pkt, order++)) {
@@ -1730,6 +1755,20 @@ Cache<TagStore>::getTimingPacket()
snoop_pkt.senderState = mshr;
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
+ // Check to see if the prefetch was squashed by an upper cache
+ if (snoop_pkt.prefetchSquashed()) {
+ DPRINTF(Cache, "Prefetch squashed by upper cache. "
+ "Deallocating mshr target %#x.\n", mshr->addr);
+
+ // Deallocate the mshr target
+ if (mshr->queue->forceDeallocateTarget(mshr)) {
+ // Clear block if this deallocation resulted freed an
+ // mshr when all had previously been utilized
+ clearBlocked((BlockedCause)(mshr->queue->index));
+ }
+ return NULL;
+ }
+
if (snoop_pkt.memInhibitAsserted()) {
markInService(mshr, &snoop_pkt);
DPRINTF(Cache, "Upward snoop of prefetch for addr"