summaryrefslogtreecommitdiff
path: root/src/mem/cache
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/cache')
-rw-r--r--src/mem/cache/cache_impl.hh39
-rw-r--r--src/mem/cache/mshr_queue.cc16
-rw-r--r--src/mem/cache/mshr_queue.hh6
3 files changed, 61 insertions, 0 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index e8e1876a0..00ba0d24f 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -1394,6 +1394,12 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
if (snoopPkt.sharedAsserted()) {
pkt->assertShared();
}
+ // If this request is a prefetch and an
+ // upper level squashes the prefetch request,
+ // make sure to propogate the squash to the requester.
+ if (snoopPkt.prefetchSquashed()) {
+ pkt->setPrefetchSquashed();
+ }
} else {
cpuSidePort->sendAtomicSnoop(pkt);
if (!alreadyResponded && pkt->memInhibitAsserted()) {
@@ -1420,6 +1426,17 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
bool respond = blk->isDirty() && pkt->needsResponse();
bool have_exclusive = blk->isWritable();
+ // Invalidate any prefetch's from below that would strip write permissions
+ // MemCmd::HardPFReq is only observed by upstream caches. After missing
+ // above and in it's own cache, a new MemCmd::ReadReq is created that
+ // downstream caches observe.
+ if (pkt->cmd == MemCmd::HardPFReq) {
+ DPRINTF(Cache, "Squashing prefetch from lower cache %#x\n",
+ pkt->getAddr());
+ pkt->setPrefetchSquashed();
+ return;
+ }
+
if (pkt->isRead() && !invalidate) {
assert(!needs_exclusive);
pkt->assertShared();
@@ -1503,6 +1520,14 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
+ // Squash any prefetch requests from below on MSHR hits
+ if (mshr && pkt->cmd == MemCmd::HardPFReq) {
+ DPRINTF(Cache, "Squashing prefetch from lower cache on mshr hit %#x\n",
+ pkt->getAddr());
+ pkt->setPrefetchSquashed();
+ return;
+ }
+
// Let the MSHR itself track the snoop and decide whether we want
// to go ahead and do the regular cache snoop
if (mshr && mshr->handleSnoop(pkt, order++)) {
@@ -1730,6 +1755,20 @@ Cache<TagStore>::getTimingPacket()
snoop_pkt.senderState = mshr;
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
+ // Check to see if the prefetch was squashed by an upper cache
+ if (snoop_pkt.prefetchSquashed()) {
+ DPRINTF(Cache, "Prefetch squashed by upper cache. "
+ "Deallocating mshr target %#x.\n", mshr->addr);
+
+ // Deallocate the mshr target
+ if (mshr->queue->forceDeallocateTarget(mshr)) {
+ // Clear block if this deallocation resulted freed an
+ // mshr when all had previously been utilized
+ clearBlocked((BlockedCause)(mshr->queue->index));
+ }
+ return NULL;
+ }
+
if (snoop_pkt.memInhibitAsserted()) {
markInService(mshr, &snoop_pkt);
DPRINTF(Cache, "Upward snoop of prefetch for addr"
diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc
index 3150b4f5d..7bfbb90f5 100644
--- a/src/mem/cache/mshr_queue.cc
+++ b/src/mem/cache/mshr_queue.cc
@@ -232,6 +232,22 @@ MSHRQueue::markPending(MSHR *mshr)
mshr->readyIter = addToReadyList(mshr);
}
+bool
+MSHRQueue::forceDeallocateTarget(MSHR *mshr)
+{
+ bool was_full = isFull();
+ assert(mshr->hasTargets());
+ // Pop the prefetch off of the target list
+ mshr->popTarget();
+ // Delete mshr if no remaining targets
+ if (!mshr->hasTargets() && !mshr->promoteDeferredTargets()) {
+ deallocateOne(mshr);
+ }
+
+ // Notify if MSHR queue no longer full
+ return was_full && !isFull();
+}
+
void
MSHRQueue::squash(int threadNum)
{
diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh
index 9177433af..7ab3c7e74 100644
--- a/src/mem/cache/mshr_queue.hh
+++ b/src/mem/cache/mshr_queue.hh
@@ -194,6 +194,12 @@ class MSHRQueue : public Drainable
void squash(int threadNum);
/**
+ * Deallocate top target, possibly freeing the MSHR
+ * @return if MSHR queue is no longer full
+ */
+ bool forceDeallocateTarget(MSHR *mshr);
+
+ /**
* Returns true if the pending list is not empty.
* @return True if there are outstanding requests.
*/