diff options
author | Steve Reinhardt <stever@eecs.umich.edu> | 2007-07-21 18:18:42 -0700 |
---|---|---|
committer | Steve Reinhardt <stever@eecs.umich.edu> | 2007-07-21 18:18:42 -0700 |
commit | 92ce2b59743c8cace420147e276d7376a4b905f1 (patch) | |
tree | 40068a62ffd952f841ef461933e1faa86493391a /src/mem/cache | |
parent | 91178600947e174041f46f54e4241cedd01bbb34 (diff) | |
download | gem5-92ce2b59743c8cace420147e276d7376a4b905f1.tar.xz |
Deal with invalidations intersecting outstanding upgrades.
If the invalidation beats the upgrade at a lower level
then the upgrade must be converted to a read exclusive
"in the field".
Restructure target list & deferred target list to
factor out some common code.
--HG--
extra : convert_revision : 7bab4482dd6c48efdb619610f0d3778c60ff777a
Diffstat (limited to 'src/mem/cache')
-rw-r--r-- | src/mem/cache/cache_impl.hh | 4 | ||||
-rw-r--r-- | src/mem/cache/miss/mshr.cc | 153 | ||||
-rw-r--r-- | src/mem/cache/miss/mshr.hh | 35 |
3 files changed, 139 insertions, 53 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index b78360d4a..9fb5cdbde 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -836,7 +836,7 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk, // must be an outstanding upgrade request on block // we're about to replace... assert(!blk->isWritable()); - assert(repl_mshr->needsExclusive); + assert(repl_mshr->needsExclusive()); // too hard to replace block with transient state; // just use temporary storage to complete the current // request and then get rid of it @@ -1177,7 +1177,7 @@ Cache<TagStore>::getTimingPacket() pkt = tgt_pkt; } else { BlkType *blk = tags->findBlock(mshr->addr); - pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive); + pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive()); mshr->isCacheFill = (pkt != NULL); diff --git a/src/mem/cache/miss/mshr.cc b/src/mem/cache/miss/mshr.cc index 7ba3789fe..856819c10 100644 --- a/src/mem/cache/miss/mshr.cc +++ b/src/mem/cache/miss/mshr.cc @@ -52,8 +52,51 @@ MSHR::MSHR() inService = false; ntargets = 0; threadNum = -1; + targets = new TargetList(); + deferredTargets = new TargetList(); } + +MSHR::TargetList::TargetList() + : needsExclusive(false), hasUpgrade(false) +{} + + +inline void +MSHR::TargetList::add(PacketPtr pkt, Tick readyTime, Counter order, bool cpuSide) +{ + if (cpuSide) { + if (pkt->needsExclusive()) { + needsExclusive = true; + } + + if (pkt->cmd == MemCmd::UpgradeReq) { + hasUpgrade = true; + } + } + + push_back(Target(pkt, readyTime, order, cpuSide)); +} + + +void +MSHR::TargetList::replaceUpgrades() +{ + if (!hasUpgrade) + return; + + Iterator end_i = end(); + for (Iterator i = begin(); i != end_i; ++i) { + if (i->pkt->cmd == MemCmd::UpgradeReq) { + i->pkt->cmd = MemCmd::ReadExReq; + DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n"); + } + } + + hasUpgrade = false; +} + + void MSHR::allocate(Addr _addr, int _size, PacketPtr target, Tick whenReady, Counter _order) @@ -64,16 +107,15 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target, order = _order; assert(target); isCacheFill = false; - needsExclusive = target->needsExclusive(); _isUncacheable = target->req->isUncacheable(); inService = false; threadNum = 0; ntargets = 1; // Don't know of a case where we would allocate a new MSHR for a // snoop (mem-side request), so set cpuSide to true here. - targets.push_back(Target(target, whenReady, _order, true)); - assert(deferredTargets.empty()); - deferredNeedsExclusive = false; + assert(targets->isReset()); + targets->add(target, whenReady, _order, true); + assert(deferredTargets->isReset()); pendingInvalidate = false; pendingShared = false; data = NULL; @@ -82,8 +124,9 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target, void MSHR::deallocate() { - assert(targets.empty()); - assert(deferredTargets.empty()); + assert(targets->empty()); + targets->resetFlags(); + assert(deferredTargets->isReset()); assert(ntargets == 0); inService = false; //allocIter = NULL; @@ -94,26 +137,25 @@ MSHR::deallocate() * Adds a target to an MSHR */ void -MSHR::allocateTarget(PacketPtr target, Tick whenReady, Counter _order) +MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order) { - if (inService) { - if (!deferredTargets.empty() || pendingInvalidate || - (!needsExclusive && target->needsExclusive())) { - // need to put on deferred list - deferredTargets.push_back(Target(target, whenReady, _order, true)); - if (target->needsExclusive()) { - deferredNeedsExclusive = true; - } - } else { - // still OK to append to outstanding request - targets.push_back(Target(target, whenReady, _order, true)); - } + // if there's a request already in service for this MSHR, we will + // have to defer the new target until after the response if any of + // the following are true: + // - there are other targets already deferred + // - there's a pending invalidate to be applied after the response + // comes back (but before this target is processed) + // - the outstanding request is for a non-exclusive block and this + // target requires an exclusive block + if (inService && + (!deferredTargets->empty() || pendingInvalidate || + (!targets->needsExclusive && pkt->needsExclusive()))) { + // need to put on deferred list + deferredTargets->add(pkt, whenReady, _order, true); } else { - if (target->needsExclusive()) { - needsExclusive = true; - } - - targets.push_back(Target(target, whenReady, _order, true)); + // no request outstanding, or still OK to append to + // outstanding request + targets->add(pkt, whenReady, _order, true); } ++ntargets; @@ -123,22 +165,50 @@ bool MSHR::handleSnoop(PacketPtr pkt, Counter _order) { if (!inService || (pkt->isExpressSnoop() && !pkt->isDeferredSnoop())) { + // Request has not been issued yet, or it's been issued + // locally but is buffered unissued at some downstream cache + // which is forwarding us this snoop. Either way, the packet + // we're snooping logically precedes this MSHR's request, so + // the snoop has no impact on the MSHR, but must be processed + // in the standard way by the cache. The only exception is + // that if we're an L2+ cache buffering an UpgradeReq from a + // higher-level cache, and the snoop is invalidating, then our + // buffered upgrades must be converted to read exclusives, + // since the upper-level cache no longer has a valid copy. + // That is, even though the upper-level cache got out on its + // local bus first, some other invalidating transaction + // reached the global bus before the upgrade did. + if (pkt->needsExclusive()) { + targets->replaceUpgrades(); + deferredTargets->replaceUpgrades(); + } + return false; } + // From here on down, the request issued by this MSHR logically + // precedes the request we're snooping. + + if (pkt->needsExclusive()) { + // snooped request still precedes the re-request we'll have to + // issue for deferred targets, if any... + deferredTargets->replaceUpgrades(); + } + if (pendingInvalidate) { // a prior snoop has already appended an invalidation, so - // logically we don't have the block anymore... + // logically we don't have the block anymore; no need for + // further snooping. return true; } - if (needsExclusive || pkt->needsExclusive()) { + if (targets->needsExclusive || pkt->needsExclusive()) { // actual target device (typ. PhysicalMemory) will delete the // packet on reception, so we need to save a copy here - targets.push_back(Target(new Packet(pkt), curTick, _order, false)); + targets->add(new Packet(pkt), curTick, _order, false); ++ntargets; - if (needsExclusive) { + if (targets->needsExclusive) { // We're awaiting an exclusive copy, so ownership is pending. // It's up to us to respond once the data arrives. pkt->assertMemInhibit(); @@ -163,21 +233,25 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order) bool MSHR::promoteDeferredTargets() { - if (deferredTargets.empty()) { + assert(targets->empty()); + if (deferredTargets->empty()) { return false; } - assert(targets.empty()); + // swap targets & deferredTargets lists + TargetList *tmp = targets; targets = deferredTargets; - deferredTargets.clear(); - assert(targets.size() == ntargets); + deferredTargets = tmp; + + assert(targets->size() == ntargets); + + // clear deferredTargets flags + deferredTargets->resetFlags(); - needsExclusive = deferredNeedsExclusive; pendingInvalidate = false; pendingShared = false; - deferredNeedsExclusive = false; - order = targets.front().order; - readyTime = std::max(curTick, targets.front().readyTime); + order = targets->front().order; + readyTime = std::max(curTick, targets->front().readyTime); return true; } @@ -202,16 +276,17 @@ MSHR::dump() "Addr: %x ntargets %d\n" "Targets:\n", inService, threadNum, addr, ntargets); - - TargetListIterator tar_it = targets.begin(); +#if 0 + TargetListIterator tar_it = targets->begin(); for (int i = 0; i < ntargets; i++) { - assert(tar_it != targets.end()); + assert(tar_it != targets->end()); ccprintf(cerr, "\t%d: Addr: %x cmd: %s\n", i, tar_it->pkt->getAddr(), tar_it->pkt->cmdString()); tar_it++; } +#endif ccprintf(cerr, "\n"); } diff --git a/src/mem/cache/miss/mshr.hh b/src/mem/cache/miss/mshr.hh index 9c6a8cf33..06ef6e113 100644 --- a/src/mem/cache/miss/mshr.hh +++ b/src/mem/cache/miss/mshr.hh @@ -68,10 +68,21 @@ class MSHR : public Packet::SenderState {} }; - /** Defines the Data structure of the MSHR targetlist. */ - typedef std::list<Target> TargetList; - /** Target list iterator. */ - typedef std::list<Target>::iterator TargetListIterator; + class TargetList : public std::list<Target> { + /** Target list iterator. */ + typedef std::list<Target>::iterator Iterator; + + public: + bool needsExclusive; + bool hasUpgrade; + + TargetList(); + void resetFlags() { needsExclusive = hasUpgrade = false; } + bool isReset() { return !needsExclusive && !hasUpgrade; } + void add(PacketPtr pkt, Tick readyTime, Counter order, bool cpuSide); + void replaceUpgrades(); + }; + /** A list of MSHRs. */ typedef std::list<MSHR *> List; /** MSHR list iterator. */ @@ -99,13 +110,13 @@ class MSHR : public Packet::SenderState /** True if we will be putting the returned block in the cache */ bool isCacheFill; + /** True if we need to get an exclusive copy of the block. */ - bool needsExclusive; + bool needsExclusive() { return targets->needsExclusive; } /** True if the request is uncacheable */ bool _isUncacheable; - bool deferredNeedsExclusive; bool pendingInvalidate; bool pendingShared; @@ -133,9 +144,9 @@ class MSHR : public Packet::SenderState private: /** List of all requests that match the address */ - TargetList targets; + TargetList *targets; - TargetList deferredTargets; + TargetList *deferredTargets; public: @@ -179,19 +190,19 @@ public: * Returns a pointer to the target list. * @return a pointer to the target list. */ - TargetList* getTargetList() { return &targets; } + TargetList *getTargetList() { return targets; } /** * Returns true if there are targets left. * @return true if there are targets */ - bool hasTargets() { return !targets.empty(); } + bool hasTargets() { return !targets->empty(); } /** * Returns a reference to the first target. * @return A pointer to the first target. */ - Target *getTarget() { assert(hasTargets()); return &targets.front(); } + Target *getTarget() { assert(hasTargets()); return &targets->front(); } /** * Pop first target. @@ -199,7 +210,7 @@ public: void popTarget() { --ntargets; - targets.pop_front(); + targets->pop_front(); } bool isSimpleForward() |