summaryrefslogtreecommitdiff
path: root/src/mem/cache
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2015-12-31 09:32:58 -0500
committerAndreas Hansson <andreas.hansson@arm.com>2015-12-31 09:32:58 -0500
commit0fcb376e5fc6bc0a7b16dc4595d4a7e3f910cbc8 (patch)
tree4be665e3596d9d4e193e6354c5577ee0077732da /src/mem/cache
parenta3177645773b8eb4b835050c395554d3e2b4664a (diff)
downloadgem5-0fcb376e5fc6bc0a7b16dc4595d4a7e3f910cbc8.tar.xz
mem: Make cache terminology easier to understand
This patch changes the name of a bunch of packet flags and MSHR member functions and variables to make the coherency protocol easier to understand. In addition the patch adds and updates lots of descriptions, explicitly spelling out assumptions. The following name changes are made: * the packet memInhibit flag is renamed to cacheResponding * the packet sharedAsserted flag is renamed to hasSharers * the packet NeedsExclusive attribute is renamed to NeedsWritable * the packet isSupplyExclusive is renamed responderHadWritable * the MSHR pendingDirty is renamed to pendingModified The cache states, Modified, Owned, Exclusive, Shared are also called out in the cache and MSHR code to make it easier to understand.
Diffstat (limited to 'src/mem/cache')
-rw-r--r--src/mem/cache/base.hh4
-rw-r--r--src/mem/cache/blk.hh13
-rw-r--r--src/mem/cache/cache.cc441
-rw-r--r--src/mem/cache/cache.hh15
-rw-r--r--src/mem/cache/mshr.cc100
-rw-r--r--src/mem/cache/mshr.hh45
-rw-r--r--src/mem/cache/mshr_queue.cc4
-rw-r--r--src/mem/cache/mshr_queue.hh6
8 files changed, 358 insertions, 270 deletions
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 5f6456fb9..8cd932f01 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -224,11 +224,11 @@ class BaseCache : public MemObject
return mshr;
}
- void markInServiceInternal(MSHR *mshr, bool pending_dirty_resp)
+ void markInServiceInternal(MSHR *mshr, bool pending_modified_resp)
{
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
- mq->markInService(mshr, pending_dirty_resp);
+ mq->markInService(mshr, pending_modified_resp);
if (wasFull && !mq->isFull()) {
clearBlocked((BlockedCause)mq->index);
}
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index 2b3a34bb8..a32edbf20 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -281,7 +281,7 @@ class CacheBlk
/**
* Pretty-print a tag, and interpret state bits to readable form
- * including mapping to a MOESI stat.
+ * including mapping to a MOESI state.
*
* @return string with basic state information
*/
@@ -299,6 +299,17 @@ class CacheBlk
* E 1 0 1
* S 0 0 1
* I 0 0 0
+ *
+ * Note that only one cache ever has a block in Modified or
+ * Owned state, i.e., only one cache owns the block, or
+ * equivalently has the BlkDirty bit set. However, multiple
+ * caches on the same path to memory can have a block in the
+ * Exclusive state (despite the name). Exclusive means this
+ * cache has the only copy at this level of the hierarchy,
+ * i.e., there may be copies in caches above this cache (in
+ * various states), but there are no peers that have copies on
+ * this branch of the hierarchy, and no caches at or above
+ * this level on any other branch have copies either.
**/
unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
char s = '?';
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index c10315499..ecdf4c855 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -157,7 +157,7 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
// can satisfy a following ReadEx anyway since we can rely on the
// Read requester(s) to have buffered the ReadEx snoop and to
// invalidate their blocks after receiving them.
- // assert(!pkt->needsExclusive() || blk->isWritable());
+ // assert(!pkt->needsWritable() || blk->isWritable());
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
// Check RMW operations first since both isRead() and
@@ -165,15 +165,19 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
if (pkt->cmd == MemCmd::SwapReq) {
cmpAndSwap(blk, pkt);
} else if (pkt->isWrite()) {
+ // we have the block in a writable state and can go ahead,
+ // note that the line may be also be considered writable in
+ // downstream caches along the path to memory, but always
+ // Exclusive, and never Modified
assert(blk->isWritable());
- // Write or WriteLine at the first cache with block in Exclusive
+ // Write or WriteLine at the first cache with block in writable state
if (blk->checkWrite(pkt)) {
pkt->writeDataToBlock(blk->data, blkSize);
}
- // Always mark the line as dirty even if we are a failed
- // StoreCond so we supply data to any snoops that have
- // appended themselves to this cache before knowing the store
- // will fail.
+ // Always mark the line as dirty (and thus transition to the
+ // Modified state) even if we are a failed StoreCond so we
+ // supply data to any snoops that have appended themselves to
+ // this cache before knowing the store will fail.
blk->status |= BlkDirty;
DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@@ -193,79 +197,80 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
assert(pkt->getSize() == blkSize);
// special handling for coherent block requests from
// upper-level caches
- if (pkt->needsExclusive()) {
+ if (pkt->needsWritable()) {
// sanity check
assert(pkt->cmd == MemCmd::ReadExReq ||
pkt->cmd == MemCmd::SCUpgradeFailReq);
// if we have a dirty copy, make sure the recipient
- // keeps it marked dirty
+ // keeps it marked dirty (in the modified state)
if (blk->isDirty()) {
- pkt->assertMemInhibit();
+ pkt->setCacheResponding();
}
// on ReadExReq we give up our copy unconditionally,
// even if this cache is mostly inclusive, we may want
// to revisit this
invalidateBlock(blk);
} else if (blk->isWritable() && !pending_downgrade &&
- !pkt->sharedAsserted() &&
+ !pkt->hasSharers() &&
pkt->cmd != MemCmd::ReadCleanReq) {
- // we can give the requester an exclusive copy (by not
- // asserting shared line) on a read request if:
- // - we have an exclusive copy at this level (& below)
+ // we can give the requester a writable copy on a read
+ // request if:
+ // - we have a writable copy at this level (& below)
// - we don't have a pending snoop from below
// signaling another read request
// - no other cache above has a copy (otherwise it
- // would have asseretd shared line on request)
- // - we are not satisfying an instruction fetch (this
- // prevents dirty data in the i-cache)
-
+ // would have set hasSharers flag when
+ // snooping the packet)
+ // - the read has explicitly asked for a clean
+ // copy of the line
if (blk->isDirty()) {
// special considerations if we're owner:
if (!deferred_response) {
- // if we are responding immediately and can
- // signal that we're transferring ownership
- // (inhibit set) along with exclusivity
- // (shared not set), do so
- pkt->assertMemInhibit();
-
- // if this cache is mostly inclusive, we keep
- // the block as writable (exclusive), and pass
- // it upwards as writable and dirty
- // (modified), hence we have multiple caches
- // considering the same block writable,
- // something that we get away with due to the
- // fact that: 1) this cache has been
- // considered the ordering points and
- // responded to all snoops up till now, and 2)
- // we always snoop upwards before consulting
- // the local cache, both on a normal request
- // (snooping done by the crossbar), and on a
- // snoop
- blk->status &= ~BlkDirty;
-
- // if this cache is mostly exclusive with
- // respect to the cache above, drop the block
+ // respond with the line in Modified state
+ // (cacheResponding set, hasSharers not set)
+ pkt->setCacheResponding();
+
if (clusivity == Enums::mostly_excl) {
+ // if this cache is mostly exclusive with
+ // respect to the cache above, drop the
+ // block, no need to first unset the dirty
+ // bit
invalidateBlock(blk);
+ } else {
+ // if this cache is mostly inclusive, we
+ // keep the block in the Exclusive state,
+ // and pass it upwards as Modified
+ // (writable and dirty), hence we have
+ // multiple caches, all on the same path
+ // towards memory, all considering the
+ // same block writable, but only one
+ // considering it Modified
+
+ // we get away with multiple caches (on
+ // the same path to memory) considering
+ // the block writeable as we always enter
+ // the cache hierarchy through a cache,
+ // and first snoop upwards in all other
+ // branches
+ blk->status &= ~BlkDirty;
}
} else {
// if we're responding after our own miss,
// there's a window where the recipient didn't
// know it was getting ownership and may not
// have responded to snoops correctly, so we
- // can't pass off ownership *or* exclusivity
- pkt->assertShared();
+ // have to respond with a shared line
+ pkt->setHasSharers();
}
}
} else {
// otherwise only respond with a shared copy
- pkt->assertShared();
+ pkt->setHasSharers();
}
}
} else {
- // Upgrade or Invalidate, since we have it Exclusively (E or
- // M), we ack then invalidate.
+ // Upgrade or Invalidate
assert(pkt->isUpgrade() || pkt->isInvalidate());
// for invalidations we could be looking at the temp block
@@ -285,9 +290,9 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
void
-Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
+Cache::markInService(MSHR *mshr, bool pending_modified_resp)
{
- markInServiceInternal(mshr, pending_dirty_resp);
+ markInServiceInternal(mshr, pending_modified_resp);
}
/////////////////////////////////////////////////////
@@ -420,9 +425,10 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
if (pkt->cmd == MemCmd::WritebackDirty) {
blk->status |= BlkDirty;
}
- // if shared is not asserted we got the writeback in modified
- // state, if it is asserted we are in the owned state
- if (!pkt->sharedAsserted()) {
+ // if the packet does not have sharers, it is passing
+ // writable, and we got the writeback in Modified or Exclusive
+ // state, if not we are in the Owned or Shared state
+ if (!pkt->hasSharers()) {
blk->status |= BlkWritable;
}
// nothing else to do; writeback doesn't expect response
@@ -445,8 +451,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
// go to next level.
return false;
} else if ((blk != NULL) &&
- (pkt->needsExclusive() ? blk->isWritable()
- : blk->isReadable())) {
+ (pkt->needsWritable() ? blk->isWritable() : blk->isReadable())) {
// OK to satisfy access
incHitCount(pkt);
satisfyCpuSideRequest(pkt, blk);
@@ -454,7 +459,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
}
// Can't satisfy access normally... either no block (blk == NULL)
- // or have block but need exclusive & only have shared.
+ // or have block but need writable
incMissCount(pkt);
@@ -607,18 +612,32 @@ Cache::recvTimingReq(PacketPtr pkt)
promoteWholeLineWrites(pkt);
- if (pkt->memInhibitAsserted()) {
+ if (pkt->cacheResponding()) {
// a cache above us (but not where the packet came from) is
- // responding to the request
- DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
+ // responding to the request, in other words it has the line
+ // in Modified or Owned state
+ DPRINTF(Cache, "Cache above responding to %#llx (%s): "
+ "not responding\n",
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
- // if the packet needs exclusive, and the cache that has
- // promised to respond (setting the inhibit flag) is not
- // providing exclusive (it is in O vs M state), we know that
- // there may be other shared copies in the system; go out and
- // invalidate them all
- if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
+ // if the packet needs the block to be writable, and the cache
+ // that has promised to respond (setting the cache responding
+ // flag) is not providing writable (it is in Owned rather than
+ // the Modified state), we know that there may be other Shared
+ // copies in the system; go out and invalidate them all
+ if (pkt->needsWritable() && !pkt->responderHadWritable()) {
+ // an upstream cache that had the line in Owned state
+ // (dirty, but not writable), is responding and thus
+ // transferring the dirty line from one branch of the
+ // cache hierarchy to another
+
+ // send out an express snoop and invalidate all other
+ // copies (snooping a packet that needs writable is the
+ // same as an invalidation), thus turning the Owned line
+ // into a Modified line, note that we don't invalidate the
+ // block in the current cache or any other cache on the
+ // path to memory
+
// create a downstream express snoop with cleared packet
// flags, there is no need to allocate any data as the
// packet is merely used to co-ordinate state transitions
@@ -629,11 +648,12 @@ Cache::recvTimingReq(PacketPtr pkt)
snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
// make this an instantaneous express snoop, and let the
- // other caches in the system know that the packet is
- // inhibited, because we have found the authorative copy
- // (O) that will supply the right data
+ // other caches in the system know that the another cache
+ // is responding, because we have found the authorative
+ // copy (Modified or Owned) that will supply the right
+ // data
snoop_pkt->setExpressSnoop();
- snoop_pkt->assertMemInhibit();
+ snoop_pkt->setCacheResponding();
// this express snoop travels towards the memory, and at
// every crossbar it is snooped upwards thus reaching
@@ -642,17 +662,20 @@ Cache::recvTimingReq(PacketPtr pkt)
// express snoops always succeed
assert(success);
- // main memory will delete the packet
+ // main memory will delete the snoop packet
}
- // queue for deletion, as the sending cache is still relying
- // on the packet
+ // queue for deletion, as opposed to immediate deletion, as
+ // the sending cache is still relying on the packet
pendingDelete.reset(pkt);
- // no need to take any action in this particular cache as the
- // caches along the path to memory are allowed to keep lines
- // in a shared state, and a cache above us already committed
- // to responding
+ // no need to take any action in this particular cache as an
+ // upstream cache has already committed to responding, and
+ // either the packet does not need writable (and we can let
+ // the cache that set the cache responding flag pass on the
+ // line without any need for intervention), or if the packet
+ // needs writable it is provided, or we have already sent out
+ // any express snoops in the section above
return true;
}
@@ -872,7 +895,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// internally, and have a sufficiently weak memory
// model, this is probably unnecessary, but at some
// point it must have seemed like we needed it...
- assert(pkt->needsExclusive());
+ assert(pkt->needsWritable());
assert(!blk->isWritable());
blk->status &= ~BlkReadable;
}
@@ -900,7 +923,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// See comment in cache.hh.
PacketPtr
Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
- bool needsExclusive) const
+ bool needsWritable) const
{
bool blkValid = blk && blk->isValid();
@@ -931,9 +954,9 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
// which will clobber the owned copy.
const bool useUpgrades = true;
if (blkValid && useUpgrades) {
- // only reason to be here is that blk is shared
- // (read-only) and we need exclusive
- assert(needsExclusive);
+ // only reason to be here is that blk is read only and we need
+ // it to be writable
+ assert(needsWritable);
assert(!blk->isWritable());
cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
} else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
@@ -945,24 +968,27 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
cmd = MemCmd::SCUpgradeFailReq;
} else if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
// forward as invalidate to all other caches, this gives us
- // the line in exclusive state, and invalidates all other
+ // the line in Exclusive state, and invalidates all other
// copies
cmd = MemCmd::InvalidateReq;
} else {
// block is invalid
- cmd = needsExclusive ? MemCmd::ReadExReq :
+ cmd = needsWritable ? MemCmd::ReadExReq :
(isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
}
PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
- // if there are sharers in the upper levels, pass that info downstream
- if (cpu_pkt->sharedAsserted()) {
+ // if there are upstream caches that have already marked the
+ // packet as having sharers (not passing writable), pass that info
+ // downstream
+ if (cpu_pkt->hasSharers()) {
// note that cpu_pkt may have spent a considerable time in the
// MSHR queue and that the information could possibly be out
// of date, however, there is no harm in conservatively
- // assuming the block is shared
- pkt->assertShared();
- DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
+ // assuming the block has sharers
+ pkt->setHasSharers();
+ DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx "
+ "size %d\n",
__func__, cpu_pkt->cmdString(), pkt->cmdString(),
pkt->getAddr(), pkt->getSize());
}
@@ -992,7 +1018,7 @@ Cache::recvAtomic(PacketPtr pkt)
promoteWholeLineWrites(pkt);
- if (pkt->memInhibitAsserted()) {
+ if (pkt->cacheResponding()) {
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
@@ -1000,19 +1026,21 @@ Cache::recvAtomic(PacketPtr pkt)
if (blk && blk->isValid()) {
tags->invalidate(blk);
blk->invalidate();
- DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
+ DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
" invalidating\n",
pkt->cmdString(), pkt->getAddr(),
pkt->isSecure() ? "s" : "ns");
}
if (!last_level_cache) {
- DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
+ DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
+ " forwarding\n",
pkt->cmdString(), pkt->getAddr(),
pkt->isSecure() ? "s" : "ns");
lat += ticksToCycles(memSidePort->sendAtomic(pkt));
}
} else {
- DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
+ DPRINTF(Cache, "Other cache responding to %s on %#llx: "
+ "not responding\n",
pkt->cmdString(), pkt->getAddr());
}
@@ -1034,7 +1062,7 @@ Cache::recvAtomic(PacketPtr pkt)
if (!satisfied) {
// MISS
- PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
+ PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsWritable());
bool is_forward = (bus_pkt == NULL);
@@ -1181,11 +1209,11 @@ Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
&& pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
blk->data);
- // data we have is dirty if marked as such or if valid & ownership
- // pending due to outstanding UpgradeReq
+ // data we have is dirty if marked as such or if we have an
+ // in-service MSHR that is pending a modified line
bool have_dirty =
have_data && (blk->isDirty() ||
- (mshr && mshr->inService && mshr->isPendingDirty()));
+ (mshr && mshr->inService && mshr->isPendingModified()));
bool done = have_dirty
|| cpuSidePort->checkFunctional(pkt)
@@ -1281,9 +1309,10 @@ Cache::recvTimingResp(PacketPtr pkt)
miss_latency;
}
- // upgrade deferred targets if we got exclusive
- if (!pkt->sharedAsserted()) {
- mshr->promoteExclusive();
+ // upgrade deferred targets if the response has no sharers, and is
+ // thus passing writable
+ if (!pkt->hasSharers()) {
+ mshr->promoteWritable();
}
bool is_fill = !mshr->isForward &&
@@ -1335,9 +1364,9 @@ Cache::recvTimingResp(PacketPtr pkt)
// from above.
if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
assert(!is_error);
- // we got the block in exclusive state, so promote any
- // deferred targets if possible
- mshr->promoteExclusive();
+ // we got the block in a writable state, so promote
+ // any deferred targets if possible
+ mshr->promoteWritable();
// NB: we use the original packet here and not the response!
blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
assert(blk != NULL);
@@ -1538,8 +1567,8 @@ Cache::writebackBlk(CacheBlk *blk)
// state, mark our own block non-writeable
blk->status &= ~BlkWritable;
} else {
- // we are in the owned state, tell the receiver
- pkt->assertShared();
+ // we are in the Owned state, tell the receiver
+ pkt->setHasSharers();
}
// make sure the block is not marked dirty
@@ -1652,7 +1681,7 @@ Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
// must be an outstanding upgrade request
// on a block we're about to replace...
assert(!blk->isWritable() || blk->isDirty());
- assert(repl_mshr->needsExclusive());
+ assert(repl_mshr->needsWritable());
// too hard to replace block with transient state
// allocation failed, block not inserted
return NULL;
@@ -1753,27 +1782,30 @@ Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
// marked as writable as part of the fill, and then later marked
// dirty as part of satisfyCpuSideRequest
if (pkt->cmd == MemCmd::WriteLineReq) {
- assert(!pkt->sharedAsserted());
+ assert(!pkt->hasSharers());
// at the moment other caches do not respond to the
// invalidation requests corresponding to a whole-line write
- assert(!pkt->memInhibitAsserted());
- }
-
- if (!pkt->sharedAsserted()) {
- // we could get non-shared responses from memory (rather than
- // a cache) even in a read-only cache, note that we set this
- // bit even for a read-only cache as we use it to represent
- // the exclusive state
+ assert(!pkt->cacheResponding());
+ }
+
+ // here we deal with setting the appropriate state of the line,
+ // and we start by looking at the hasSharers flag, and ignore the
+ // cacheResponding flag (normally signalling dirty data) if the
+ // packet has sharers, thus the line is never allocated as Owned
+ // (dirty but not writable), and always ends up being either
+ // Shared, Exclusive or Modified, see Packet::setCacheResponding
+ // for more details
+ if (!pkt->hasSharers()) {
+ // we could get a writable line from memory (rather than a
+ // cache) even in a read-only cache, note that we set this bit
+ // even for a read-only cache, possibly revisit this decision
blk->status |= BlkWritable;
- // If we got this via cache-to-cache transfer (i.e., from a
- // cache that was an owner) and took away that owner's copy,
- // then we need to write it back. Normally this happens
- // anyway as a side effect of getting a copy to write it, but
- // there are cases (such as failed store conditionals or
- // compare-and-swaps) where we'll demand an exclusive copy but
- // end up not writing it.
- if (pkt->memInhibitAsserted()) {
+ // check if we got this via cache-to-cache transfer (i.e., from a
+ // cache that had the block in Modified or Owned state)
+ if (pkt->cacheResponding()) {
+ // we got the block in Modified state, and invalidated the
+ // owners copy
blk->status |= BlkDirty;
chatty_assert(!isReadOnly, "Should never see dirty snoop response "
@@ -1827,7 +1859,7 @@ Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
pkt = new Packet(req_pkt, false, req_pkt->isRead());
assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
- pkt->sharedAsserted());
+ pkt->hasSharers());
pkt->makeTimingResponse();
if (pkt->isRead()) {
pkt->setDataFromBlock(blk_data, blkSize);
@@ -1835,11 +1867,11 @@ Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
// Assume we defer a response to a read from a far-away cache
// A, then later defer a ReadExcl from a cache B on the same
- // bus as us. We'll assert MemInhibit in both cases, but in
- // the latter case MemInhibit will keep the invalidation from
- // reaching cache A. This special response tells cache A that
- // it gets the block to satisfy its read, but must immediately
- // invalidate it.
+ // bus as us. We'll assert cacheResponding in both cases, but
+ // in the latter case cacheResponding will keep the
+ // invalidation from reaching cache A. This special response
+ // tells cache A that it gets the block to satisfy its read,
+ // but must immediately invalidate it.
pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
// Here we consider forward_time, paying for just forward latency and
@@ -1870,7 +1902,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// responds in atomic mode, so remember a few things about the
// original packet up front
bool invalidate = pkt->isInvalidate();
- bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
+ bool M5_VAR_USED needs_writable = pkt->needsWritable();
uint32_t snoop_delay = 0;
@@ -1878,7 +1910,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// first propagate snoop upward to see if anyone above us wants to
// handle it. save & restore packet src since it will get
// rewritten to be relative to cpu-side bus (if any)
- bool alreadyResponded = pkt->memInhibitAsserted();
+ bool alreadyResponded = pkt->cacheResponding();
if (is_timing) {
// copy the packet so that we can clear any flags before
// forwarding it upwards, we also allocate data (passing
@@ -1896,13 +1928,15 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// cache
snoop_delay += snoopPkt.headerDelay;
- if (snoopPkt.memInhibitAsserted()) {
+ if (snoopPkt.cacheResponding()) {
// cache-to-cache response from some upper cache
assert(!alreadyResponded);
- pkt->assertMemInhibit();
+ pkt->setCacheResponding();
}
- if (snoopPkt.sharedAsserted()) {
- pkt->assertShared();
+ // upstream cache has the block, or has an outstanding
+ // MSHR, pass the flag on
+ if (snoopPkt.hasSharers()) {
+ pkt->setHasSharers();
}
// If this request is a prefetch or clean evict and an upper level
// signals block present, make sure to propagate the block
@@ -1912,7 +1946,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
}
} else {
cpuSidePort->sendAtomicSnoop(pkt);
- if (!alreadyResponded && pkt->memInhibitAsserted()) {
+ if (!alreadyResponded && pkt->cacheResponding()) {
// cache-to-cache response from some upper cache:
// forward response to original requester
assert(pkt->isResponse());
@@ -1941,7 +1975,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// invalidation itself is taken care of below.
bool respond = blk->isDirty() && pkt->needsResponse() &&
pkt->cmd != MemCmd::InvalidateReq;
- bool have_exclusive = blk->isWritable();
+ bool have_writable = blk->isWritable();
// Invalidate any prefetch's from below that would strip write permissions
// MemCmd::HardPFReq is only observed by upstream caches. After missing
@@ -1955,31 +1989,37 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
}
if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
- // reading non-exclusive shared data, note that we retain
- // the block in owned state if it is dirty, with the response
- // taken care of below, and otherwhise simply downgrade to
- // shared
- assert(!needs_exclusive);
- pkt->assertShared();
+ // reading without requiring the line in a writable state,
+ // note that we retain the block as Owned if it is Modified
+ // (dirty data), with the response taken care of below, and
+ // otherwhise simply downgrade from Exclusive to Shared (or
+ // remain in Shared)
+ assert(!needs_writable);
+ pkt->setHasSharers();
blk->status &= ~BlkWritable;
}
if (respond) {
// prevent anyone else from responding, cache as well as
// memory, and also prevent any memory from even seeing the
- // request (with current inhibited semantics), note that this
- // applies both to reads and writes and that for writes it
- // works thanks to the fact that we still have dirty data and
- // will write it back at a later point
- assert(!pkt->memInhibitAsserted());
- pkt->assertMemInhibit();
- if (have_exclusive) {
+ // request
+ pkt->setCacheResponding();
+ if (have_writable) {
+ // inform the cache hierarchy that this cache had the line
+ // in the Modified state so that we avoid unnecessary
+ // invalidations (see Packet::setResponderHadWritable)
+ pkt->setResponderHadWritable();
+
// in the case of an uncacheable request there is no point
- // in setting the exclusive flag, but since the recipient
- // does not care there is no harm in doing so, in any case
- // it is just a hint
- pkt->setSupplyExclusive();
+ // in setting the responderHadWritable flag, but since the
+ // recipient does not care there is no harm in doing so
+ } else {
+ // if the packet has needsWritable set we invalidate our
+ // copy below and all other copies will be invalidates
+ // through express snoops, and if needsWritable is not set
+ // we already called setHasSharers above
}
+
if (is_timing) {
doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
} else {
@@ -2090,18 +2130,19 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
}
if (wb_pkt->cmd == MemCmd::WritebackDirty) {
- assert(!pkt->memInhibitAsserted());
- pkt->assertMemInhibit();
- if (!pkt->needsExclusive()) {
- pkt->assertShared();
- // the writeback is no longer passing exclusivity (the
- // receiving cache should consider the block owned
- // rather than modified)
- wb_pkt->assertShared();
+ // we have dirty data, and so will proceed to respond
+ pkt->setCacheResponding();
+ if (!pkt->needsWritable()) {
+ // the packet should end up in the Shared state (non
+ // writable) on the completion of the fill
+ pkt->setHasSharers();
+ // similarly, the writeback is no longer passing
+ // writeable (the receiving cache should consider the
+ // block Owned rather than Modified)
+ wb_pkt->setHasSharers();
} else {
- // if we're not asserting the shared line, we need to
- // invalidate our copy. we'll do that below as long as
- // the packet's invalidate flag is set...
+ // we need to invalidate our copy. we do that
+ // below.
assert(pkt->isInvalidate());
}
doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
@@ -2114,16 +2155,16 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
// The cache technically holds the block until the
// corresponding message reaches the crossbar
// below. Therefore when a snoop encounters a CleanEvict
- // or WritebackClean message we must set assertShared
- // (just like when it encounters a Writeback) to avoid the
- // snoop filter prematurely clearing the holder bit in the
- // crossbar below
- if (!pkt->needsExclusive()) {
- pkt->assertShared();
- // the writeback is no longer passing exclusivity (the
- // receiving cache should consider the block owned
- // rather than modified)
- wb_pkt->assertShared();
+ // or WritebackClean message we must call
+ // setHasSharers (just like when it encounters a
+ // Writeback) to avoid the snoop filter prematurely
+ // clearing the holder bit in the crossbar below
+ if (!pkt->needsWritable()) {
+ pkt->setHasSharers();
+ // the writeback is no longer passing writeable (the
+ // receiving cache should consider the block Owned
+ // rather than Modified)
+ wb_pkt->setHasSharers();
} else {
assert(pkt->isInvalidate());
}
@@ -2280,7 +2321,7 @@ Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
snoop_pkt.senderState = NULL;
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
// Writeback/CleanEvict snoops do not generate a snoop response.
- assert(!(snoop_pkt.memInhibitAsserted()));
+ assert(!(snoop_pkt.cacheResponding()));
return snoop_pkt.isBlockCached();
} else {
cpuSidePort->sendAtomicSnoop(pkt);
@@ -2327,18 +2368,21 @@ Cache::getTimingPacket()
// the MSHRs and when it was selected to be sent or if the
// prefetch was squashed by an upper cache.
- // It is important to check memInhibitAsserted before
- // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
- // will be sending a response which will arrive at the MSHR
- // allocated ofr this request. Checking the prefetchSquash first
- // may result in the MSHR being prematurely deallocated.
-
- if (snoop_pkt.memInhibitAsserted()) {
+ // It is important to check cacheResponding before
+ // prefetchSquashed. If another cache has committed to
+ // responding, it will be sending a dirty response which will
+ // arrive at the MSHR allocated for this request. Checking the
+ // prefetchSquash first may result in the MSHR being
+ // prematurely deallocated.
+ if (snoop_pkt.cacheResponding()) {
auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
assert(r.second);
- // If we are getting a non-shared response it is dirty
- bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
- markInService(mshr, pending_dirty_resp);
+
+ // if we are getting a snoop response with no sharers it
+ // will be allocated as Modified
+ bool pending_modified_resp = !snoop_pkt.hasSharers();
+ markInService(mshr, pending_modified_resp);
+
DPRINTF(Cache, "Upward snoop of prefetch for addr"
" %#x (%s) hit\n",
tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
@@ -2364,7 +2408,7 @@ Cache::getTimingPacket()
assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
pkt = tgt_pkt;
} else {
- pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
+ pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
mshr->isForward = (pkt == NULL);
@@ -2454,10 +2498,11 @@ Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
bool success = false;
- // always let inhibited requests through, even if blocked,
- // ultimately we should check if this is an express snoop, but at
- // the moment that flag is only set in the cache itself
- if (pkt->memInhibitAsserted()) {
+ // always let packets through if an upstream cache has committed
+ // to responding, even if blocked (we should technically look at
+ // the isExpressSnoop flag, but it is set by the cache itself, and
+ // consequently we have to rely on the cacheResponding flag)
+ if (pkt->cacheResponding()) {
// do not change the current retry state
bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
assert(bypass_success);
@@ -2597,18 +2642,16 @@ Cache::CacheReqPacketQueue::sendDeferredPacket()
// it gets retried
} else {
// As part of the call to sendTimingReq the packet is
- // forwarded to all neighbouring caches (and any
- // caches above them) as a snoop. The packet is also
- // sent to any potential cache below as the
- // interconnect is not allowed to buffer the
- // packet. Thus at this point we know if any of the
- // neighbouring, or the downstream cache is
- // responding, and if so, if it is with a dirty line
- // or not.
- bool pending_dirty_resp = !pkt->sharedAsserted() &&
- pkt->memInhibitAsserted();
-
- cache.markInService(mshr, pending_dirty_resp);
+ // forwarded to all neighbouring caches (and any caches
+ // above them) as a snoop. Thus at this point we know if
+ // any of the neighbouring caches are responding, and if
+ // so, we know it is dirty, and we can determine if it is
+ // being passed as Modified, making our MSHR the ordering
+ // point
+ bool pending_modified_resp = !pkt->hasSharers() &&
+ pkt->cacheResponding();
+
+ cache.markInService(mshr, pending_modified_resp);
}
}
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index 4f1763c89..a1ab66ab1 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -472,13 +472,14 @@ class Cache : public BaseCache
PacketPtr getTimingPacket();
/**
- * Marks a request as in service (sent on the bus). This can have
- * side effect since storage for no response commands is
- * deallocated once they are successfully sent. Also remember if
- * we are expecting a dirty response from another cache,
- * effectively making this MSHR the ordering point.
- */
- void markInService(MSHR *mshr, bool pending_dirty_resp);
+ * Marks a request as in service (sent downstream in the memory
+ * system). This can have side effect since storage for no
+ * response commands is deallocated once they are successfully
+ * sent. Also remember if we are expecting a Modified (dirty and
+ * writable) response from another cache, effectively making this
+ * MSHR the ordering point.
+ */
+ void markInService(MSHR *mshr, bool pending_modified_resp);
/**
* Return whether there are any outstanding misses.
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index e6a62949a..e2141a429 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -62,7 +62,7 @@
using namespace std;
MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
- pendingDirty(false),
+ pendingModified(false),
postInvalidate(false), postDowngrade(false),
queue(NULL), order(0), blkAddr(0),
blkSize(0), isSecure(false), inService(false),
@@ -73,7 +73,7 @@ MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
MSHR::TargetList::TargetList()
- : needsExclusive(false), hasUpgrade(false)
+ : needsWritable(false), hasUpgrade(false)
{}
@@ -82,8 +82,8 @@ MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
Counter order, Target::Source source, bool markPending)
{
if (source != Target::FromSnoop) {
- if (pkt->needsExclusive()) {
- needsExclusive = true;
+ if (pkt->needsWritable()) {
+ needsWritable = true;
}
// StoreCondReq is effectively an upgrade if it's in an MSHR
@@ -238,7 +238,7 @@ MSHR::clearDownstreamPending()
}
bool
-MSHR::markInService(bool pending_dirty_resp)
+MSHR::markInService(bool pending_modified_resp)
{
assert(!inService);
if (isForwardNoResponse()) {
@@ -250,7 +250,7 @@ MSHR::markInService(bool pending_dirty_resp)
}
inService = true;
- pendingDirty = targets.needsExclusive || pending_dirty_resp;
+ pendingModified = targets.needsWritable || pending_modified_resp;
postInvalidate = postDowngrade = false;
if (!downstreamPending) {
@@ -297,14 +297,14 @@ MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
// - there are other targets already deferred
// - there's a pending invalidate to be applied after the response
// comes back (but before this target is processed)
- // - this target requires an exclusive block and either we're not
- // getting an exclusive block back or we have already snooped
- // another read request that will downgrade our exclusive block
- // to shared
+ // - this target requires a writable block and either we're not
+ // getting a writable block back or we have already snooped
+ // another read request that will downgrade our writable block
+ // to non-writable (Shared or Owned)
if (inService &&
(!deferredTargets.empty() || hasPostInvalidate() ||
- (pkt->needsExclusive() &&
- (!isPendingDirty() || hasPostDowngrade() || isForward)))) {
+ (pkt->needsWritable() &&
+ (!isPendingModified() || hasPostDowngrade() || isForward)))) {
// need to put on deferred list
if (hasPostInvalidate())
replaceUpgrade(pkt);
@@ -324,11 +324,11 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
- // when we snoop packets the needsExclusive and isInvalidate flags
+ // when we snoop packets the needsWritable and isInvalidate flags
// should always be the same, however, this assumes that we never
// snoop writes as they are currently not marked as invalidations
- panic_if(pkt->needsExclusive() != pkt->isInvalidate(),
- "%s got snoop %s to addr %#llx where needsExclusive, "
+ panic_if(pkt->needsWritable() != pkt->isInvalidate(),
+ "%s got snoop %s to addr %#llx where needsWritable, "
"does not match isInvalidate", name(), pkt->cmdString(),
pkt->getAddr());
@@ -346,7 +346,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// That is, even though the upper-level cache got out on its
// local bus first, some other invalidating transaction
// reached the global bus before the upgrade did.
- if (pkt->needsExclusive()) {
+ if (pkt->needsWritable()) {
targets.replaceUpgrades();
deferredTargets.replaceUpgrades();
}
@@ -356,7 +356,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// From here on down, the request issued by this MSHR logically
// precedes the request we're snooping.
- if (pkt->needsExclusive()) {
+ if (pkt->needsWritable()) {
// snooped request still precedes the re-request we'll have to
// issue for deferred targets, if any...
deferredTargets.replaceUpgrades();
@@ -369,17 +369,18 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
return true;
}
- if (isPendingDirty() || pkt->isInvalidate()) {
+ if (isPendingModified() || pkt->isInvalidate()) {
// We need to save and replay the packet in two cases:
- // 1. We're awaiting an exclusive copy, so ownership is pending,
- // and we need to deal with the snoop after we receive data.
+ // 1. We're awaiting a writable copy (Modified or Exclusive),
+ // so this MSHR is the orgering point, and we need to respond
+ // after we receive data.
// 2. It's an invalidation (e.g., UpgradeReq), and we need
// to forward the snoop up the hierarchy after the current
// transaction completes.
// Start by determining if we will eventually respond or not,
// matching the conditions checked in Cache::handleSnoop
- bool will_respond = isPendingDirty() && pkt->needsResponse() &&
+ bool will_respond = isPendingModified() && pkt->needsResponse() &&
pkt->cmd != MemCmd::InvalidateReq;
// The packet we are snooping may be deleted by the time we
@@ -395,29 +396,39 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
new Packet(new Request(*pkt->req), pkt->cmd);
- if (isPendingDirty()) {
- // The new packet will need to get the response from the
- // MSHR already queued up here
- pkt->assertMemInhibit();
+ if (isPendingModified()) {
+ // we are the ordering point, and will consequently
+ // respond, and depending on whether the packet
+ // needsWritable or not we either pass a Shared line or a
+ // Modified line
+ pkt->setCacheResponding();
+
+ // inform the cache hierarchy that this cache had the line
+ // in the Modified state, even if the response is passed
+ // as Shared (and thus non-writable)
+ pkt->setResponderHadWritable();
+
// in the case of an uncacheable request there is no need
- // to set the exclusive flag, but since the recipient does
- // not care there is no harm in doing so
- pkt->setSupplyExclusive();
+ // to set the responderHadWritable flag, but since the
+ // recipient does not care there is no harm in doing so
}
targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
- downstreamPending && targets.needsExclusive);
+ downstreamPending && targets.needsWritable);
- if (pkt->needsExclusive()) {
+ if (pkt->needsWritable()) {
// This transaction will take away our pending copy
postInvalidate = true;
}
}
- if (!pkt->needsExclusive() && !pkt->req->isUncacheable()) {
+ if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
// This transaction will get a read-shared copy, downgrading
- // our copy if we had an exclusive one
+ // our copy if we had a writable one
postDowngrade = true;
- pkt->assertShared();
+ // make sure that any downstream cache does not respond with a
+ // writable (and dirty) copy even if it has one, unless it was
+ // explicitly asked for one
+ pkt->setHasSharers();
}
return true;
@@ -446,20 +457,19 @@ MSHR::promoteDeferredTargets()
void
-MSHR::promoteExclusive()
+MSHR::promoteWritable()
{
- if (deferredTargets.needsExclusive &&
+ if (deferredTargets.needsWritable &&
!(hasPostInvalidate() || hasPostDowngrade())) {
- // We got an exclusive response, but we have deferred targets
- // which are waiting to request an exclusive copy (not because
+ // We got a writable response, but we have deferred targets
+ // which are waiting to request a writable copy (not because
// of a pending invalidate). This can happen if the original
- // request was for a read-only (non-exclusive) block, but we
- // got an exclusive copy anyway because of the E part of the
- // MOESI/MESI protocol. Since we got the exclusive copy
- // there's no need to defer the targets, so move them up to
- // the regular target list.
- assert(!targets.needsExclusive);
- targets.needsExclusive = true;
+ // request was for a read-only block, but we got a writable
+ // response anyway. Since we got the writable copy there's no
+ // need to defer the targets, so move them up to the regular
+ // target list.
+ assert(!targets.needsWritable);
+ targets.needsWritable = true;
// if any of the deferred targets were upper-level cache
// requests marked downstreamPending, need to clear that
assert(!downstreamPending); // not pending here anymore
@@ -496,7 +506,7 @@ MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
isForward ? "Forward" : "",
allocOnFill ? "AllocOnFill" : "",
isForwardNoResponse() ? "ForwNoResp" : "",
- needsExclusive() ? "Excl" : "",
+ needsWritable() ? "Wrtbl" : "",
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "",
downstreamPending ? "DwnPend" : "",
diff --git a/src/mem/cache/mshr.hh b/src/mem/cache/mshr.hh
index 0abb70e29..82a674672 100644
--- a/src/mem/cache/mshr.hh
+++ b/src/mem/cache/mshr.hh
@@ -80,8 +80,31 @@ class MSHR : public Packet::SenderState, public Printable
/** Flag set by downstream caches */
bool downstreamPending;
- /** Will we have a dirty copy after this request? */
- bool pendingDirty;
+ /**
+ * Here we use one flag to track both if:
+ *
+ * 1. We are going to become owner or not, i.e., we will get the
+ * block in an ownership state (Owned or Modified) with BlkDirty
+ * set. This determines whether or not we are going to become the
+ * responder and ordering point for future requests that we snoop.
+ *
+ * 2. We know that we are going to get a writable block, i.e. we
+ * will get the block in writable state (Exclusive or Modified
+ * state) with BlkWritable set. That determines whether additional
+ * targets with needsWritable set will be able to be satisfied, or
+ * if not should be put on the deferred list to possibly wait for
+ * another request that does give us writable access.
+ *
+ * Condition 2 is actually just a shortcut that saves us from
+ * possibly building a deferred target list and calling
+ * promoteWritable() every time we get a writable block. Condition
+ * 1, tracking ownership, is what is important. However, we never
+ * receive ownership without marking the block dirty, and
+ * consequently use pendingModified to track both ownership and
+ * writability rather than having separate pendingDirty and
+ * pendingWritable flags.
+ */
+ bool pendingModified;
/** Did we snoop an invalidate while waiting for data? */
bool postInvalidate;
@@ -118,12 +141,12 @@ class MSHR : public Packet::SenderState, public Printable
class TargetList : public std::list<Target> {
public:
- bool needsExclusive;
+ bool needsWritable;
bool hasUpgrade;
TargetList();
- void resetFlags() { needsExclusive = hasUpgrade = false; }
- bool isReset() const { return !needsExclusive && !hasUpgrade; }
+ void resetFlags() { needsWritable = hasUpgrade = false; }
+ bool isReset() const { return !needsWritable && !hasUpgrade; }
void add(PacketPtr pkt, Tick readyTime, Counter order,
Target::Source source, bool markPending);
void replaceUpgrades();
@@ -169,11 +192,11 @@ class MSHR : public Packet::SenderState, public Printable
* flags are accessed improperly.
*/
- /** True if we need to get an exclusive copy of the block. */
- bool needsExclusive() const { return targets.needsExclusive; }
+ /** True if we need to get a writable copy of the block. */
+ bool needsWritable() const { return targets.needsWritable; }
- bool isPendingDirty() const {
- assert(inService); return pendingDirty;
+ bool isPendingModified() const {
+ assert(inService); return pendingModified;
}
bool hasPostInvalidate() const {
@@ -223,7 +246,7 @@ class MSHR : public Packet::SenderState, public Printable
void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter _order, bool alloc_on_fill);
- bool markInService(bool pending_dirty_resp);
+ bool markInService(bool pending_modified_resp);
void clearDownstreamPending();
@@ -284,7 +307,7 @@ class MSHR : public Packet::SenderState, public Printable
bool promoteDeferredTargets();
- void promoteExclusive();
+ void promoteWritable();
bool checkFunctional(PacketPtr pkt);
diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc
index 6efd291b8..ed6769349 100644
--- a/src/mem/cache/mshr_queue.cc
+++ b/src/mem/cache/mshr_queue.cc
@@ -200,9 +200,9 @@ MSHRQueue::moveToFront(MSHR *mshr)
}
void
-MSHRQueue::markInService(MSHR *mshr, bool pending_dirty_resp)
+MSHRQueue::markInService(MSHR *mshr, bool pending_modified_resp)
{
- if (mshr->markInService(pending_dirty_resp)) {
+ if (mshr->markInService(pending_modified_resp)) {
deallocate(mshr);
} else {
readyList.erase(mshr->readyIter);
diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh
index a2ad6d020..45aa1c15f 100644
--- a/src/mem/cache/mshr_queue.hh
+++ b/src/mem/cache/mshr_queue.hh
@@ -188,10 +188,10 @@ class MSHRQueue : public Drainable
* readyList or deallocates the MSHR if it does not expect a response.
*
* @param mshr The MSHR to mark in service.
- * @param pending_dirty_resp Whether we expect a dirty response
- * from another cache
+ * @param pending_modified_resp Whether we expect a modified response
+ * from another cache
*/
- void markInService(MSHR *mshr, bool pending_dirty_resp);
+ void markInService(MSHR *mshr, bool pending_modified_resp);
/**
* Mark an in service entry as pending, used to resend a request.