diff options
author | Ron Dreslinski <rdreslin@umich.edu> | 2006-10-05 21:10:03 -0400 |
---|---|---|
committer | Ron Dreslinski <rdreslin@umich.edu> | 2006-10-05 21:10:03 -0400 |
commit | 45f881a4ced25105267799432c0f526400f0ba9e (patch) | |
tree | 3d791c2408be2f1b9c1d349a5dc06ffdb4850b4a /src | |
parent | 868d112578467273a50de3bf926bf0d280eebcd3 (diff) | |
download | gem5-45f881a4ced25105267799432c0f526400f0ba9e.tar.xz |
First pass at snooping stuff that compiles and doesn't break.
Still need:
-Handle NACK's on the recieve side
-Distinguish top level caches
-Handle repsonses from caches failing the fast path
-Handle BusError and propogate it
-Fix the invalidate packet associated with snooping in the cache
src/mem/bus.cc:
Make sure to snoop on functional accesses
src/mem/cache/base_cache.cc:
Wait to make a request into a response until it is ready to be issued
src/mem/cache/base_cache.hh:
Support range changes for snoops
Set up snoop responses for cache->cache transfers
src/mem/cache/cache_impl.hh:
Only access the cache if it wasn't satisfied by cache->cache transfer
Handle snoop phases (detect block, then snoop)
Fix functional access to work properly (still need to fix snoop path for functional accesses)
--HG--
extra : convert_revision : 4c25f11d7a996c1f56f4f7b55dde87a344e5fdf8
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/bus.cc | 1 | ||||
-rw-r--r-- | src/mem/cache/base_cache.cc | 4 | ||||
-rw-r--r-- | src/mem/cache/base_cache.hh | 42 | ||||
-rw-r--r-- | src/mem/cache/cache_impl.hh | 34 |
4 files changed, 61 insertions, 20 deletions
diff --git a/src/mem/bus.cc b/src/mem/bus.cc index cf9e54e62..3c5283a77 100644 --- a/src/mem/bus.cc +++ b/src/mem/bus.cc @@ -252,6 +252,7 @@ Bus::recvFunctional(Packet *pkt) DPRINTF(Bus, "recvFunctional: packet src %d dest %d addr 0x%x cmd %s\n", pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString()); assert(pkt->getDest() == Packet::Broadcast); + atomicSnoop(pkt); findPort(pkt->getAddr(), pkt->getSrc())->sendFunctional(pkt); } diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc index a172847df..e6138e320 100644 --- a/src/mem/cache/base_cache.cc +++ b/src/mem/cache/base_cache.cc @@ -199,7 +199,9 @@ BaseCache::CacheEvent::process() return; } //Response - //Know the packet to send, no need to mark in service (must succed) + //Know the packet to send + pkt->result = Packet::Success; + pkt->makeTimingResponse(); assert(cachePort->sendTiming(pkt)); } diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh index 069dbab58..49999dcb4 100644 --- a/src/mem/cache/base_cache.hh +++ b/src/mem/cache/base_cache.hh @@ -127,6 +127,8 @@ class BaseCache : public MemObject CachePort *cpuSidePort; CachePort *memSidePort; + bool snoopRangesSent; + public: virtual Port *getPort(const std::string &if_name, int idx = -1); @@ -149,17 +151,22 @@ class BaseCache : public MemObject void recvStatusChange(Port::Status status, bool isCpuSide) { - if (status == Port::RangeChange) - { - if (!isCpuSide) - { + if (status == Port::RangeChange){ + if (!isCpuSide) { cpuSidePort->sendStatusChange(Port::RangeChange); + if (topLevelCache && !snoopRangesSent) { + snoopRangesSent = true; + memSidePort->sendStatusChange(Port::RangeChange); + } } - else - { + else { memSidePort->sendStatusChange(Port::RangeChange); } } + else if (status == Port::SnoopSquash) { + assert(snoopPhase2); + snoopPhase2 = false; + } } virtual Packet *getPacket() @@ -205,6 +212,10 @@ class BaseCache : public MemObject /** True if this cache is connected to the CPU. */ bool topLevelCache; + + /** True if we are now in phase 2 of the snoop process. */ + bool snoopPhase2; + /** Stores time the cache blocked for statistics. */ Tick blockedCycle; @@ -332,6 +343,7 @@ class BaseCache : public MemObject //Start ports at null if more than one is created we should panic cpuSidePort = NULL; memSidePort = NULL; + snoopRangesSent = false; } virtual void init(); @@ -519,8 +531,6 @@ class BaseCache : public MemObject if (!pkt->req->isUncacheable()) { missLatency[pkt->cmdToIndex()][pkt->req->getThreadNum()] += time - pkt->time; } - pkt->makeTimingResponse(); - pkt->result = Packet::Success; CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt); reqCpu->schedule(time); } @@ -529,10 +539,12 @@ class BaseCache : public MemObject * Suppliess the data if cache to cache transfers are enabled. * @param pkt The bus transaction to fulfill. */ - void respondToSnoop(Packet *pkt) + void respondToSnoop(Packet *pkt, Tick time) { - assert("Implement\n" && 0); +// assert("Implement\n" && 0); // mi->respond(pkt,curTick + hitLatency); + CacheEvent *reqMem = new CacheEvent(memSidePort, pkt); + reqMem->schedule(time); } /** @@ -551,6 +563,16 @@ class BaseCache : public MemObject else { //This is where snoops get updated + AddrRangeList dummy; + if (!topLevelCache) + { + cpuSidePort->getPeerAddressRanges(dummy, snoop); + } + else + { + snoop.push_back(RangeSize(0,-1)); + } + return; } } diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 11cd84e88..bea495f9f 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -63,14 +63,26 @@ doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide) if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) { pkt->req->setScResult(1); } - access(pkt); + if (!(pkt->flags & SATISFIED)) { + access(pkt); + } } else { if (pkt->isResponse()) handleResponse(pkt); - else - snoop(pkt); + else { + //Check if we are in phase1 + if (!snoopPhase2) { + snoopPhase2 = true; + } + else { + //Check if we should do the snoop + if (pkt->flags && SNOOP_COMMIT) + snoop(pkt); + snoopPhase2 = false; + } + } } return true; } @@ -117,7 +129,7 @@ doFunctionalAccess(Packet *pkt, bool isCpuSide) assert("Can't handle LL/SC on functional path\n"); } - probe(pkt, true); + probe(pkt, false); //TEMP ALWAYS SUCCESFUL FOR NOW pkt->result = Packet::Success; } @@ -126,7 +138,7 @@ doFunctionalAccess(Packet *pkt, bool isCpuSide) if (pkt->isResponse()) handleResponse(pkt); else - snoopProbe(pkt, true); + snoopProbe(pkt, false); } } @@ -372,7 +384,7 @@ template<class TagStore, class Buffering, class Coherence> void Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt) { - + DPRINTF(Cache, "SNOOPING"); Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); BlkType *blk = tags->findBlock(pkt); MSHR *mshr = missQueue->findMSHR(blk_addr); @@ -385,7 +397,10 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt) //If the outstanding request was an invalidate (upgrade,readex,..) //Then we need to ACK the request until we get the data //Also NACK if the outstanding request is not a cachefill (writeback) + pkt->flags |= SATISFIED; pkt->flags |= NACKED_LINE; + assert("Don't detect these on the other side yet\n"); + respondToSnoop(pkt, curTick + hitLatency); return; } else { @@ -398,6 +413,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt) //@todo Make it so that a read to a pending read can't be exclusive now. //Set the address so find match works + assert("Don't have invalidates yet\n"); invalidatePkt->addrOverride(pkt->getAddr()); //Append the invalidate on @@ -433,7 +449,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt) assert(offset + pkt->getSize() <=blkSize); memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize()); - respondToSnoop(pkt); + respondToSnoop(pkt, curTick + hitLatency); } if (pkt->isInvalidate()) { @@ -449,7 +465,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt) bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); if (satisfy) { tags->handleSnoop(blk, new_state, pkt); - respondToSnoop(pkt); + respondToSnoop(pkt, curTick + hitLatency); return; } tags->handleSnoop(blk, new_state); @@ -517,7 +533,7 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update) missQueue->findWrites(blk_addr, writes); if (!update) { - memSidePort->sendFunctional(pkt); + memSidePort->sendFunctional(pkt); // Check for data in MSHR and writebuffer. if (mshr) { warn("Found outstanding miss on an non-update probe"); |