summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
authorRon Dreslinski <rdreslin@umich.edu>2006-11-10 22:45:50 -0500
committerRon Dreslinski <rdreslin@umich.edu>2006-11-10 22:45:50 -0500
commitf876bc2bf0e04b888c2748c0cabf8d11b31f41b7 (patch)
tree8dcd8a817d203442fdcbc332d7fed0d5ab8f8701 /src/mem
parent9a6e896d3bc904745f090aad1a6d40f04f5ac2ef (diff)
downloadgem5-f876bc2bf0e04b888c2748c0cabf8d11b31f41b7.tar.xz
More fixes for functional accesses. It now makes the writeback memory leak to crash all configs.
Working on that now. src/mem/cache/base_cache.cc: Keep a list of the responders so we can search them on functional accesses. src/mem/cache/base_cache.hh: Properly put things on a list for responses so we can search the list. Also, be sure to check the outgoing ports lists on a functional access (factor some common code out there) src/mem/cache/cache_impl.hh: Properly return when the first read hit on a functional access. Make sure to call to check the other ports list of packets before forwarding it out. --HG-- extra : convert_revision : 1d21cb55ff29c15716617efc48441329707c088a
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/cache/base_cache.cc87
-rw-r--r--src/mem/cache/base_cache.hh121
-rw-r--r--src/mem/cache/cache_impl.hh21
3 files changed, 187 insertions, 42 deletions
diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc
index c26d7782b..489a24d4c 100644
--- a/src/mem/cache/base_cache.cc
+++ b/src/mem/cache/base_cache.cc
@@ -102,21 +102,51 @@ BaseCache::CachePort::recvAtomic(PacketPtr pkt)
return cache->doAtomicAccess(pkt, isCpuSide);
}
-void
-BaseCache::CachePort::recvFunctional(PacketPtr pkt)
+bool
+BaseCache::CachePort::checkFunctional(PacketPtr pkt)
{
//Check storage here first
list<PacketPtr>::iterator i = drainList.begin();
- list<PacketPtr>::iterator end = drainList.end();
- for (; i != end; ++i) {
+ list<PacketPtr>::iterator iend = drainList.end();
+ bool notDone = true;
+ while (i != iend && notDone) {
PacketPtr target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
- fixPacket(pkt, target);
+ notDone = fixPacket(pkt, target);
}
+ i++;
+ }
+ //Also check the response not yet ready to be on the list
+ std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
+ std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
+
+ while (j != jend && notDone) {
+ PacketPtr target = j->second;
+ // If the target contains data, and it overlaps the
+ // probed request, need to update data
+ if (target->intersect(pkt))
+ notDone = fixPacket(pkt, target);
+ j++;
}
- cache->doFunctionalAccess(pkt, isCpuSide);
+ return notDone;
+}
+
+void
+BaseCache::CachePort::recvFunctional(PacketPtr pkt)
+{
+ bool notDone = checkFunctional(pkt);
+ if (notDone)
+ cache->doFunctionalAccess(pkt, isCpuSide);
+}
+
+void
+BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
+{
+ bool notDone = checkFunctional(pkt);
+ if (notDone)
+ sendFunctional(pkt);
}
void
@@ -135,7 +165,7 @@ BaseCache::CachePort::recvRetry()
isCpuSide && cache->doSlaveRequest()) {
DPRINTF(CachePort, "%s has more responses/requests\n", name());
- BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
+ BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
reqCpu->schedule(curTick + 1);
}
waitingOnRetry = false;
@@ -176,7 +206,7 @@ BaseCache::CachePort::recvRetry()
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
- BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
+ BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
reqCpu->schedule(curTick + 1);
}
}
@@ -194,7 +224,7 @@ BaseCache::CachePort::recvRetry()
{
DPRINTF(CachePort, "%s has more requests\n", name());
//Still more to issue, rerequest in 1 cycle
- BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
+ BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
reqCpu->schedule(curTick + 1);
}
}
@@ -226,23 +256,19 @@ BaseCache::CachePort::clearBlocked()
}
}
-BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
- : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
+BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse)
+ : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort),
+ newResponse(_newResponse)
{
- this->setFlags(AutoDelete);
+ if (!newResponse)
+ this->setFlags(AutoDelete);
pkt = NULL;
}
-BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, PacketPtr _pkt)
- : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
-{
- this->setFlags(AutoDelete);
-}
-
void
BaseCache::CacheEvent::process()
{
- if (!pkt)
+ if (!newResponse)
{
if (cachePort->waitingOnRetry) return;
//We have some responses to drain first
@@ -322,8 +348,16 @@ BaseCache::CacheEvent::process()
}
return;
}
- //Response
- //Know the packet to send
+ //Else it's a response Response
+ assert(cachePort->transmitList.size());
+ assert(cachePort->transmitList.front().first <= curTick);
+ pkt = cachePort->transmitList.front().second;
+ cachePort->transmitList.pop_front();
+ if (!cachePort->transmitList.empty()) {
+ Tick time = cachePort->transmitList.front().first;
+ schedule(time <= curTick ? curTick+1 : time);
+ }
+
if (pkt->flags & NACKED_LINE)
pkt->result = Packet::Nacked;
else
@@ -343,7 +377,7 @@ BaseCache::CacheEvent::process()
}
// Check if we're done draining once this list is empty
- if (cachePort->drainList.empty())
+ if (cachePort->drainList.empty() && cachePort->transmitList.empty())
cachePort->cache->checkDrain();
}
@@ -358,8 +392,10 @@ BaseCache::getPort(const std::string &if_name, int idx)
{
if (if_name == "")
{
- if(cpuSidePort == NULL)
+ if(cpuSidePort == NULL) {
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
+ sendEvent = new CacheEvent(cpuSidePort, true);
+ }
return cpuSidePort;
}
else if (if_name == "functional")
@@ -368,8 +404,10 @@ BaseCache::getPort(const std::string &if_name, int idx)
}
else if (if_name == "cpu_side")
{
- if(cpuSidePort == NULL)
+ if(cpuSidePort == NULL) {
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
+ sendEvent = new CacheEvent(cpuSidePort, true);
+ }
return cpuSidePort;
}
else if (if_name == "mem_side")
@@ -377,6 +415,7 @@ BaseCache::getPort(const std::string &if_name, int idx)
if (memSidePort != NULL)
panic("Already have a mem side for this cache\n");
memSidePort = new CachePort(name() + "-mem_side_port", this, false);
+ memSendEvent = new CacheEvent(memSidePort, true);
return memSidePort;
}
else panic("Port name %s unrecognized\n", if_name);
diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh
index ea7544fbb..9a79bd36c 100644
--- a/src/mem/cache/base_cache.hh
+++ b/src/mem/cache/base_cache.hh
@@ -105,7 +105,11 @@ class BaseCache : public MemObject
void clearBlocked();
- bool canDrain() { return drainList.empty(); }
+ bool checkFunctional(PacketPtr pkt);
+
+ void checkAndSendFunctional(PacketPtr pkt);
+
+ bool canDrain() { return drainList.empty() && transmitList.empty(); }
bool blocked;
@@ -117,15 +121,16 @@ class BaseCache : public MemObject
std::list<PacketPtr> drainList;
+ std::list<std::pair<Tick,PacketPtr> > transmitList;
};
struct CacheEvent : public Event
{
CachePort *cachePort;
PacketPtr pkt;
+ bool newResponse;
- CacheEvent(CachePort *_cachePort);
- CacheEvent(CachePort *_cachePort, PacketPtr _pkt);
+ CacheEvent(CachePort *_cachePort, bool response);
void process();
const char *description();
};
@@ -133,6 +138,9 @@ class BaseCache : public MemObject
public: //Made public so coherence can get at it.
CachePort *cpuSidePort;
+ CacheEvent *sendEvent;
+ CacheEvent *memSendEvent;
+
protected:
CachePort *memSidePort;
@@ -353,6 +361,12 @@ class BaseCache : public MemObject
snoopRangesSent = false;
}
+ ~BaseCache()
+ {
+ delete sendEvent;
+ delete memSendEvent;
+ }
+
virtual void init();
/**
@@ -467,7 +481,8 @@ class BaseCache : public MemObject
{
if (!doMasterRequest() && !memSidePort->waitingOnRetry)
{
- BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
+ BaseCache::CacheEvent * reqCpu =
+ new BaseCache::CacheEvent(memSidePort, false);
reqCpu->schedule(time);
}
uint8_t flag = 1<<cause;
@@ -503,7 +518,8 @@ class BaseCache : public MemObject
{
if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
{
- BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(cpuSidePort);
+ BaseCache::CacheEvent * reqCpu =
+ new BaseCache::CacheEvent(cpuSidePort, false);
reqCpu->schedule(time);
}
uint8_t flag = 1<<cause;
@@ -528,9 +544,38 @@ class BaseCache : public MemObject
*/
void respond(PacketPtr pkt, Tick time)
{
+ assert(time >= curTick);
if (pkt->needsResponse()) {
- CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
+/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
reqCpu->schedule(time);
+*/
+ if (cpuSidePort->transmitList.empty()) {
+ assert(!sendEvent->scheduled());
+ sendEvent->schedule(time);
+ cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
+ (time,pkt));
+ return;
+ }
+
+ // something is on the list and this belongs at the end
+ if (time >= cpuSidePort->transmitList.back().first) {
+ cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
+ (time,pkt));
+ return;
+ }
+ // Something is on the list and this belongs somewhere else
+ std::list<std::pair<Tick,PacketPtr> >::iterator i =
+ cpuSidePort->transmitList.begin();
+ std::list<std::pair<Tick,PacketPtr> >::iterator end =
+ cpuSidePort->transmitList.end();
+ bool done = false;
+
+ while (i != end && !done) {
+ if (time < i->first)
+ cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
+ (time,pkt));
+ i++;
+ }
}
else {
if (pkt->cmd != Packet::UpgradeReq)
@@ -548,12 +593,42 @@ class BaseCache : public MemObject
*/
void respondToMiss(PacketPtr pkt, Tick time)
{
+ assert(time >= curTick);
if (!pkt->req->isUncacheable()) {
- missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
+ missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
+ time - pkt->time;
}
if (pkt->needsResponse()) {
- CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
+/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
reqCpu->schedule(time);
+*/
+ if (cpuSidePort->transmitList.empty()) {
+ assert(!sendEvent->scheduled());
+ sendEvent->schedule(time);
+ cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
+ (time,pkt));
+ return;
+ }
+
+ // something is on the list and this belongs at the end
+ if (time >= cpuSidePort->transmitList.back().first) {
+ cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
+ (time,pkt));
+ return;
+ }
+ // Something is on the list and this belongs somewhere else
+ std::list<std::pair<Tick,PacketPtr> >::iterator i =
+ cpuSidePort->transmitList.begin();
+ std::list<std::pair<Tick,PacketPtr> >::iterator end =
+ cpuSidePort->transmitList.end();
+ bool done = false;
+
+ while (i != end && !done) {
+ if (time < i->first)
+ cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
+ (time,pkt));
+ i++;
+ }
}
else {
if (pkt->cmd != Packet::UpgradeReq)
@@ -570,9 +645,37 @@ class BaseCache : public MemObject
*/
void respondToSnoop(PacketPtr pkt, Tick time)
{
+ assert(time >= curTick);
assert (pkt->needsResponse());
- CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
+/* CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
reqMem->schedule(time);
+*/
+ if (memSidePort->transmitList.empty()) {
+ assert(!memSendEvent->scheduled());
+ memSendEvent->schedule(time);
+ memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
+ (time,pkt));
+ return;
+ }
+
+ // something is on the list and this belongs at the end
+ if (time >= memSidePort->transmitList.back().first) {
+ memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
+ (time,pkt));
+ return;
+ }
+ // Something is on the list and this belongs somewhere else
+ std::list<std::pair<Tick,PacketPtr> >::iterator i =
+ memSidePort->transmitList.begin();
+ std::list<std::pair<Tick,PacketPtr> >::iterator end =
+ memSidePort->transmitList.end();
+ bool done = false;
+
+ while (i != end && !done) {
+ if (time < i->first)
+ memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
+ i++;
+ }
}
/**
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 9bb72e85c..176d9159a 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -536,7 +536,7 @@ Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
if (!update && (pkt->isWrite() || (otherSidePort == cpuSidePort))) {
// Still need to change data in all locations.
- otherSidePort->sendFunctional(pkt);
+ otherSidePort->checkAndSendFunctional(pkt);
if (pkt->isRead() && pkt->result == Packet::Success)
return 0;
}
@@ -560,30 +560,33 @@ Cache<TagStore,Buffering,Coherence>::probe(PacketPtr &pkt, bool update,
missQueue->findWrites(blk_addr, writes);
if (!update) {
+ bool notDone = !(pkt->flags & SATISFIED); //Hit in cache (was a block)
// Check for data in MSHR and writebuffer.
if (mshr) {
MSHR::TargetList *targets = mshr->getTargetList();
MSHR::TargetList::iterator i = targets->begin();
MSHR::TargetList::iterator end = targets->end();
- for (; i != end; ++i) {
+ for (; i != end && notDone; ++i) {
PacketPtr target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
- fixPacket(pkt, target);
+ DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a MSHR\n",
+ blk_addr);
+ notDone = fixPacket(pkt, target);
}
}
}
- for (int i = 0; i < writes.size(); ++i) {
+ for (int i = 0; i < writes.size() && notDone; ++i) {
PacketPtr write = writes[i]->pkt;
if (write->intersect(pkt)) {
- fixPacket(pkt, write);
+ DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a writeback\n",
+ pkt->cmdString(), blk_addr);
+ notDone = fixPacket(pkt, write);
}
}
- if (pkt->isRead()
- && pkt->result != Packet::Success
- && otherSidePort == memSidePort) {
- otherSidePort->sendFunctional(pkt);
+ if (notDone && otherSidePort == memSidePort) {
+ otherSidePort->checkAndSendFunctional(pkt);
assert(pkt->result == Packet::Success);
}
return 0;