summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
authorMitch Hayenga <mitch.hayenga@arm.com>2014-05-09 18:58:46 -0400
committerMitch Hayenga <mitch.hayenga@arm.com>2014-05-09 18:58:46 -0400
commita15b713cba52d9d4d2c1204fef050fb3856ca33e (patch)
tree4bd5a1e3fda3aa33feb10cc475f8b8554f3d0958 /src/mem
parentb9e6c260a01bab2b59d6eef4f45a642f57484275 (diff)
downloadgem5-a15b713cba52d9d4d2c1204fef050fb3856ca33e.tar.xz
mem: Squash prefetch requests from downstream caches
This patch squashes prefetch requests from downstream caches, so that they do not steal cachelines away from caches closer to the cpu. It was originally coded by Mitch Hayenga and modified by Aasheesh Kolli.
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/cache/cache_impl.hh39
-rw-r--r--src/mem/cache/mshr_queue.cc16
-rw-r--r--src/mem/cache/mshr_queue.hh6
-rw-r--r--src/mem/packet.hh4
4 files changed, 65 insertions, 0 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index e8e1876a0..00ba0d24f 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -1394,6 +1394,12 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
if (snoopPkt.sharedAsserted()) {
pkt->assertShared();
}
+ // If this request is a prefetch and an
+ // upper level squashes the prefetch request,
+ // make sure to propogate the squash to the requester.
+ if (snoopPkt.prefetchSquashed()) {
+ pkt->setPrefetchSquashed();
+ }
} else {
cpuSidePort->sendAtomicSnoop(pkt);
if (!alreadyResponded && pkt->memInhibitAsserted()) {
@@ -1420,6 +1426,17 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
bool respond = blk->isDirty() && pkt->needsResponse();
bool have_exclusive = blk->isWritable();
+ // Invalidate any prefetch's from below that would strip write permissions
+ // MemCmd::HardPFReq is only observed by upstream caches. After missing
+ // above and in it's own cache, a new MemCmd::ReadReq is created that
+ // downstream caches observe.
+ if (pkt->cmd == MemCmd::HardPFReq) {
+ DPRINTF(Cache, "Squashing prefetch from lower cache %#x\n",
+ pkt->getAddr());
+ pkt->setPrefetchSquashed();
+ return;
+ }
+
if (pkt->isRead() && !invalidate) {
assert(!needs_exclusive);
pkt->assertShared();
@@ -1503,6 +1520,14 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
+ // Squash any prefetch requests from below on MSHR hits
+ if (mshr && pkt->cmd == MemCmd::HardPFReq) {
+ DPRINTF(Cache, "Squashing prefetch from lower cache on mshr hit %#x\n",
+ pkt->getAddr());
+ pkt->setPrefetchSquashed();
+ return;
+ }
+
// Let the MSHR itself track the snoop and decide whether we want
// to go ahead and do the regular cache snoop
if (mshr && mshr->handleSnoop(pkt, order++)) {
@@ -1730,6 +1755,20 @@ Cache<TagStore>::getTimingPacket()
snoop_pkt.senderState = mshr;
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
+ // Check to see if the prefetch was squashed by an upper cache
+ if (snoop_pkt.prefetchSquashed()) {
+ DPRINTF(Cache, "Prefetch squashed by upper cache. "
+ "Deallocating mshr target %#x.\n", mshr->addr);
+
+ // Deallocate the mshr target
+ if (mshr->queue->forceDeallocateTarget(mshr)) {
+ // Clear block if this deallocation resulted freed an
+ // mshr when all had previously been utilized
+ clearBlocked((BlockedCause)(mshr->queue->index));
+ }
+ return NULL;
+ }
+
if (snoop_pkt.memInhibitAsserted()) {
markInService(mshr, &snoop_pkt);
DPRINTF(Cache, "Upward snoop of prefetch for addr"
diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc
index 3150b4f5d..7bfbb90f5 100644
--- a/src/mem/cache/mshr_queue.cc
+++ b/src/mem/cache/mshr_queue.cc
@@ -232,6 +232,22 @@ MSHRQueue::markPending(MSHR *mshr)
mshr->readyIter = addToReadyList(mshr);
}
+bool
+MSHRQueue::forceDeallocateTarget(MSHR *mshr)
+{
+ bool was_full = isFull();
+ assert(mshr->hasTargets());
+ // Pop the prefetch off of the target list
+ mshr->popTarget();
+ // Delete mshr if no remaining targets
+ if (!mshr->hasTargets() && !mshr->promoteDeferredTargets()) {
+ deallocateOne(mshr);
+ }
+
+ // Notify if MSHR queue no longer full
+ return was_full && !isFull();
+}
+
void
MSHRQueue::squash(int threadNum)
{
diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh
index 9177433af..7ab3c7e74 100644
--- a/src/mem/cache/mshr_queue.hh
+++ b/src/mem/cache/mshr_queue.hh
@@ -194,6 +194,12 @@ class MSHRQueue : public Drainable
void squash(int threadNum);
/**
+ * Deallocate top target, possibly freeing the MSHR
+ * @return if MSHR queue is no longer full
+ */
+ bool forceDeallocateTarget(MSHR *mshr);
+
+ /**
* Returns true if the pending list is not empty.
* @return True if there are outstanding requests.
*/
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 4bdcc9a93..7b9e05945 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -260,6 +260,8 @@ class Packet : public Printable
/// suppress the error if this packet encounters a functional
/// access failure.
static const FlagsType SUPPRESS_FUNC_ERROR = 0x00008000;
+ // Signal prefetch squash through express snoop flag
+ static const FlagsType PREFETCH_SNOOP_SQUASH = 0x00010000;
Flags flags;
@@ -522,6 +524,8 @@ class Packet : public Printable
bool isSupplyExclusive() const { return flags.isSet(SUPPLY_EXCLUSIVE); }
void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
+ void setPrefetchSquashed() { flags.set(PREFETCH_SNOOP_SQUASH); }
+ bool prefetchSquashed() const { return flags.isSet(PREFETCH_SNOOP_SQUASH); }
// Network error conditions... encapsulate them as methods since
// their encoding keeps changing (from result field to command