summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/abstract_mem.cc9
-rw-r--r--src/mem/cache/cache_impl.hh66
-rw-r--r--src/mem/packet.hh45
3 files changed, 85 insertions, 35 deletions
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
index dca0403fb..ec1be04e1 100644
--- a/src/mem/abstract_mem.cc
+++ b/src/mem/abstract_mem.cc
@@ -379,6 +379,13 @@ AbstractMemory::access(PacketPtr pkt)
bytesRead[pkt->req->masterId()] += pkt->getSize();
if (pkt->req->isInstFetch())
bytesInstRead[pkt->req->masterId()] += pkt->getSize();
+ } else if (pkt->isInvalidate()) {
+ // no need to do anything
+ // this clause is intentionally before the write clause: the only
+ // transaction that is both a write and an invalidate is
+ // WriteInvalidate, and for the sake of consistency, it does not
+ // write to memory. in a cacheless system, there are no WriteInv's
+ // because the Write -> WriteInvalidate rewrite happens in the cache.
} else if (pkt->isWrite()) {
if (writeOK(pkt)) {
if (pmemAddr) {
@@ -391,8 +398,6 @@ AbstractMemory::access(PacketPtr pkt)
numWrites[pkt->req->masterId()]++;
bytesWritten[pkt->req->masterId()] += pkt->getSize();
}
- } else if (pkt->isInvalidate()) {
- // no need to do anything
} else {
panic("unimplemented");
}
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index b46717f14..f9eacb897 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -70,7 +70,7 @@ Cache<TagStore>::Cache(const Params *p)
: BaseCache(p),
tags(dynamic_cast<TagStore*>(p->tags)),
prefetcher(p->prefetcher),
- doFastWrites(false),
+ doFastWrites(true),
prefetchOnAccess(p->prefetch_on_access)
{
tempBlock = new BlkType();
@@ -167,7 +167,10 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
// isWrite() will be true for them
if (pkt->cmd == MemCmd::SwapReq) {
cmpAndSwap(blk, pkt);
- } else if (pkt->isWrite()) {
+ } else if (pkt->isWrite() &&
+ (!pkt->isWriteInvalidate() || isTopLevel)) {
+ assert(blk->isWritable());
+ // Write or WriteInvalidate at the first cache with block in Exclusive
if (blk->checkWrite(pkt)) {
pkt->writeDataToBlock(blk->data, blkSize);
}
@@ -176,6 +179,8 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
// appended themselves to this cache before knowing the store
// will fail.
blk->status |= BlkDirty;
+ DPRINTF(Cache, "%s for %s address %x size %d (write)\n", __func__,
+ pkt->cmdString(), pkt->getAddr(), pkt->getSize());
} else if (pkt->isRead()) {
if (pkt->isLLSC()) {
blk->trackLoadLocked(pkt);
@@ -229,13 +234,15 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
}
}
} else {
- // Not a read or write... must be an upgrade. it's OK
- // to just ack those as long as we have an exclusive
- // copy at this level.
- assert(pkt->isUpgrade());
+ // Upgrade or WriteInvalidate at a different cache than received it.
+ // Since we have it Exclusively (E or M), we ack then invalidate.
+ assert(pkt->isUpgrade() ||
+ (pkt->isWriteInvalidate() && !isTopLevel));
assert(blk != tempBlock);
tags->invalidate(blk);
blk->invalidate();
+ DPRINTF(Cache, "%s for %s address %x size %d (invalidation)\n",
+ __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
}
}
@@ -742,6 +749,8 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
// where the determination the StoreCond fails is delayed due to
// all caches not being on the same local bus.
cmd = MemCmd::SCUpgradeFailReq;
+ } else if (cpu_pkt->isWriteInvalidate()) {
+ cmd = cpu_pkt->cmd;
} else {
// block is invalid
cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
@@ -843,6 +852,14 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
if (bus_pkt->isError()) {
pkt->makeAtomicResponse();
pkt->copyError(bus_pkt);
+ } else if (pkt->isWriteInvalidate()) {
+ // note the use of pkt, not bus_pkt here.
+ if (isTopLevel) {
+ blk = handleFill(pkt, blk, writebacks);
+ satisfyCpuSideRequest(pkt, blk);
+ } else if (blk) {
+ satisfyCpuSideRequest(pkt, blk);
+ }
} else if (bus_pkt->isRead() ||
bus_pkt->cmd == MemCmd::UpgradeResp) {
// we're updating cache state to allow us to
@@ -1048,6 +1065,23 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
break; // skip response
}
+ // unlike the other packet flows, where data is found in other
+ // caches or memory and brought back, write invalidates always
+ // have the data right away, so the above check for "is fill?"
+ // cannot actually be determined until examining the stored MSHR
+ // state. We "catch up" with that logic here, which is duplicated
+ // from above.
+ if (target->pkt->isWriteInvalidate() && isTopLevel) {
+ assert(!is_error);
+
+ // NB: we use the original packet here and not the response!
+ mshr->handleFill(target->pkt, blk);
+ blk = handleFill(target->pkt, blk, writebacks);
+ assert(blk != NULL);
+
+ is_fill = true;
+ }
+
if (is_fill) {
satisfyCpuSideRequest(target->pkt, blk,
true, mshr->hasPostDowngrade());
@@ -1138,7 +1172,8 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
}
if (blk && blk->isValid()) {
- if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
+ if ((pkt->isInvalidate() || mshr->hasPostInvalidate()) &&
+ (!pkt->isWriteInvalidate() || !isTopLevel)) {
assert(blk != tempBlock);
tags->invalidate(blk);
blk->invalidate();
@@ -1344,7 +1379,7 @@ typename Cache<TagStore>::BlkType*
Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
PacketList &writebacks)
{
- assert(pkt->isResponse());
+ assert(pkt->isResponse() || pkt->isWriteInvalidate());
Addr addr = pkt->getAddr();
bool is_secure = pkt->isSecure();
#if TRACING_ON
@@ -1355,8 +1390,10 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
// better have read new data...
assert(pkt->hasData());
- // only read reaponses have data
- assert(pkt->isRead());
+ // only read responses and (original) write invalidate req's have data;
+ // note that we don't write the data here for write invalidate - that
+ // happens in the subsequent satisfyCpuSideRequest.
+ assert(pkt->isRead() || pkt->isWriteInvalidate());
// need to do a replacement
blk = allocateBlock(addr, is_secure, writebacks);
@@ -1539,8 +1576,13 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
// we may end up modifying both the block state and the packet (if
// we respond in atomic mode), so just figure out what to do now
- // and then do it later.
- bool respond = blk->isDirty() && pkt->needsResponse();
+ // and then do it later. If we find dirty data while snooping for a
+ // WriteInvalidate, we don't care, since no merging needs to take place.
+ // We need the eviction to happen as normal, but the data needn't be
+ // sent anywhere. nor should the writeback be inhibited at the memory
+ // controller for any reason.
+ bool respond = blk->isDirty() && pkt->needsResponse()
+ && !pkt->isWriteInvalidate();
bool have_exclusive = blk->isWritable();
// Invalidate any prefetch's from below that would strip write permissions
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 9320d7886..8e3bcdd37 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -177,14 +177,16 @@ class MemCmd
public:
- bool isRead() const { return testCmdAttrib(IsRead); }
- bool isWrite() const { return testCmdAttrib(IsWrite); }
- bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
- bool isRequest() const { return testCmdAttrib(IsRequest); }
- bool isResponse() const { return testCmdAttrib(IsResponse); }
- bool needsExclusive() const { return testCmdAttrib(NeedsExclusive); }
- bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
- bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
+ bool isRead() const { return testCmdAttrib(IsRead); }
+ bool isWrite() const { return testCmdAttrib(IsWrite); }
+ bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
+ bool isRequest() const { return testCmdAttrib(IsRequest); }
+ bool isResponse() const { return testCmdAttrib(IsResponse); }
+ bool needsExclusive() const { return testCmdAttrib(NeedsExclusive); }
+ bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
+ bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
+ bool isWriteInvalidate() const { return testCmdAttrib(IsWrite) &&
+ testCmdAttrib(IsInvalidate); }
/**
* Check if this particular packet type carries payload data. Note
@@ -495,19 +497,20 @@ class Packet : public Printable
/// Return the index of this command.
inline int cmdToIndex() const { return cmd.toInt(); }
- bool isRead() const { return cmd.isRead(); }
- bool isWrite() const { return cmd.isWrite(); }
- bool isUpgrade() const { return cmd.isUpgrade(); }
- bool isRequest() const { return cmd.isRequest(); }
- bool isResponse() const { return cmd.isResponse(); }
- bool needsExclusive() const { return cmd.needsExclusive(); }
- bool needsResponse() const { return cmd.needsResponse(); }
- bool isInvalidate() const { return cmd.isInvalidate(); }
- bool hasData() const { return cmd.hasData(); }
- bool isLLSC() const { return cmd.isLLSC(); }
- bool isError() const { return cmd.isError(); }
- bool isPrint() const { return cmd.isPrint(); }
- bool isFlush() const { return cmd.isFlush(); }
+ bool isRead() const { return cmd.isRead(); }
+ bool isWrite() const { return cmd.isWrite(); }
+ bool isUpgrade() const { return cmd.isUpgrade(); }
+ bool isRequest() const { return cmd.isRequest(); }
+ bool isResponse() const { return cmd.isResponse(); }
+ bool needsExclusive() const { return cmd.needsExclusive(); }
+ bool needsResponse() const { return cmd.needsResponse(); }
+ bool isInvalidate() const { return cmd.isInvalidate(); }
+ bool isWriteInvalidate() const { return cmd.isWriteInvalidate(); }
+ bool hasData() const { return cmd.hasData(); }
+ bool isLLSC() const { return cmd.isLLSC(); }
+ bool isError() const { return cmd.isError(); }
+ bool isPrint() const { return cmd.isPrint(); }
+ bool isFlush() const { return cmd.isFlush(); }
// Snoop flags
void assertMemInhibit()