From 6babda7123be5e69db137e77589d88c768c19345 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Sat, 30 Jun 2007 13:34:16 -0700 Subject: Fix up a few statistics problems. Stats pretty much line up with old code, except: - bug in old code included L1 latency in L2 miss time, making it too high - UniCoherence did cache-to-cache transfers even from non-owner caches, so occasionally the icache would get a block from the dcache not the L2 - L2 can now receive ReadExReq from L1 since L1s have coherence --HG-- extra : convert_revision : 5052c1a1767b5a662f30a88f16012165a73b791c --- src/mem/cache/miss/mshr.cc | 20 ++++++++++---------- src/mem/cache/miss/mshr.hh | 10 ++++++---- src/mem/cache/miss/mshr_queue.cc | 4 ++-- src/mem/cache/miss/mshr_queue.hh | 6 +++--- 4 files changed, 21 insertions(+), 19 deletions(-) (limited to 'src/mem/cache/miss') diff --git a/src/mem/cache/miss/mshr.cc b/src/mem/cache/miss/mshr.cc index 63b3cacc2..5d5e63f90 100644 --- a/src/mem/cache/miss/mshr.cc +++ b/src/mem/cache/miss/mshr.cc @@ -56,11 +56,11 @@ MSHR::MSHR() void MSHR::allocate(Addr _addr, int _size, PacketPtr target, - Tick when, Counter _order) + Tick whenReady, Counter _order) { addr = _addr; size = _size; - readyTick = when; + readyTime = whenReady; order = _order; assert(target); isCacheFill = false; @@ -71,7 +71,7 @@ MSHR::allocate(Addr _addr, int _size, PacketPtr target, ntargets = 1; // Don't know of a case where we would allocate a new MSHR for a // snoop (mem-side request), so set cpuSide to true here. - targets.push_back(Target(target, when, _order, true)); + targets.push_back(Target(target, whenReady, _order, true)); assert(deferredTargets.empty()); deferredNeedsExclusive = false; pendingInvalidate = false; @@ -94,33 +94,33 @@ MSHR::deallocate() * Adds a target to an MSHR */ void -MSHR::allocateTarget(PacketPtr target, Tick when, Counter _order) +MSHR::allocateTarget(PacketPtr target, Tick whenReady, Counter _order) { if (inService) { if (!deferredTargets.empty() || pendingInvalidate || (!needsExclusive && target->needsExclusive())) { // need to put on deferred list - deferredTargets.push_back(Target(target, when, _order, true)); + deferredTargets.push_back(Target(target, whenReady, _order, true)); if (target->needsExclusive()) { deferredNeedsExclusive = true; } } else { // still OK to append to outstanding request - targets.push_back(Target(target, when, _order, true)); + targets.push_back(Target(target, whenReady, _order, true)); } } else { if (target->needsExclusive()) { needsExclusive = true; } - targets.push_back(Target(target, when, _order, true)); + targets.push_back(Target(target, whenReady, _order, true)); } ++ntargets; } void -MSHR::allocateSnoopTarget(PacketPtr pkt, Tick when, Counter _order) +MSHR::allocateSnoopTarget(PacketPtr pkt, Tick whenReady, Counter _order) { assert(inService); // don't bother to call otherwise @@ -137,7 +137,7 @@ MSHR::allocateSnoopTarget(PacketPtr pkt, Tick when, Counter _order) if (needsExclusive || pkt->needsExclusive()) { // actual target device (typ. PhysicalMemory) will delete the // packet on reception, so we need to save a copy here - targets.push_back(Target(new Packet(pkt), when, _order, false)); + targets.push_back(Target(new Packet(pkt), whenReady, _order, false)); ++ntargets; if (needsExclusive) { @@ -177,7 +177,7 @@ MSHR::promoteDeferredTargets() pendingShared = false; deferredNeedsExclusive = false; order = targets.front().order; - readyTick = std::max(curTick, targets.front().time); + readyTime = std::max(curTick, targets.front().readyTime); return true; } diff --git a/src/mem/cache/miss/mshr.hh b/src/mem/cache/miss/mshr.hh index 4db7b1cfe..293f290b8 100644 --- a/src/mem/cache/miss/mshr.hh +++ b/src/mem/cache/miss/mshr.hh @@ -54,15 +54,17 @@ class MSHR : public Packet::SenderState class Target { public: - Tick time; //!< Time when request was received (for stats) + Tick recvTime; //!< Time when request was received (for stats) + Tick readyTime; //!< Time when request is ready to be serviced Counter order; //!< Global order (for memory consistency mgmt) PacketPtr pkt; //!< Pending request packet. bool cpuSide; //!< Did request come from cpu side or mem side? bool isCpuSide() { return cpuSide; } - Target(PacketPtr _pkt, Tick _time, Counter _order, bool _cpuSide) - : time(_time), order(_order), pkt(_pkt), cpuSide(_cpuSide) + Target(PacketPtr _pkt, Tick _readyTime, Counter _order, bool _cpuSide) + : recvTime(curTick), readyTime(_readyTime), order(_order), + pkt(_pkt), cpuSide(_cpuSide) {} }; @@ -81,7 +83,7 @@ class MSHR : public Packet::SenderState MSHRQueue *queue; /** Cycle when ready to issue */ - Tick readyTick; + Tick readyTime; /** Order number assigned by the miss queue. */ Counter order; diff --git a/src/mem/cache/miss/mshr_queue.cc b/src/mem/cache/miss/mshr_queue.cc index 18184bd20..56ec62a7d 100644 --- a/src/mem/cache/miss/mshr_queue.cc +++ b/src/mem/cache/miss/mshr_queue.cc @@ -111,14 +111,14 @@ MSHRQueue::findPending(Addr addr, int size) const MSHR::Iterator MSHRQueue::addToReadyList(MSHR *mshr) { - if (readyList.empty() || readyList.back()->readyTick <= mshr->readyTick) { + if (readyList.empty() || readyList.back()->readyTime <= mshr->readyTime) { return readyList.insert(readyList.end(), mshr); } MSHR::Iterator i = readyList.begin(); MSHR::Iterator end = readyList.end(); for (; i != end; ++i) { - if ((*i)->readyTick > mshr->readyTick) { + if ((*i)->readyTime > mshr->readyTime) { return readyList.insert(i, mshr); } } diff --git a/src/mem/cache/miss/mshr_queue.hh b/src/mem/cache/miss/mshr_queue.hh index fd61dec8b..1f1d59e98 100644 --- a/src/mem/cache/miss/mshr_queue.hh +++ b/src/mem/cache/miss/mshr_queue.hh @@ -193,15 +193,15 @@ class MSHRQueue */ MSHR *getNextMSHR() const { - if (readyList.empty() || readyList.front()->readyTick > curTick) { + if (readyList.empty() || readyList.front()->readyTime > curTick) { return NULL; } return readyList.front(); } - Tick nextMSHRReadyTick() const + Tick nextMSHRReadyTime() const { - return readyList.empty() ? MaxTick : readyList.front()->readyTick; + return readyList.empty() ? MaxTick : readyList.front()->readyTime; } }; -- cgit v1.2.3