summaryrefslogtreecommitdiff
path: root/src/mem/cache/cache_impl.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/cache/cache_impl.hh')
-rw-r--r--src/mem/cache/cache_impl.hh75
1 files changed, 45 insertions, 30 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 46f4b0ebe..c3c1c0881 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -60,28 +60,20 @@ doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
{
if (isCpuSide)
{
- if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
+ if (pkt->isWrite() && (pkt->req->isLocked())) {
pkt->req->setScResult(1);
}
- if (!(pkt->flags & SATISFIED)) {
- access(pkt);
- }
+ access(pkt);
+
}
else
{
if (pkt->isResponse())
handleResponse(pkt);
else {
- //Check if we are in phase1
- if (!snoopPhase2) {
- snoopPhase2 = true;
- }
- else {
- //Check if we should do the snoop
- if (pkt->flags && SNOOP_COMMIT)
- snoop(pkt);
- snoopPhase2 = false;
- }
+ //Check if we should do the snoop
+ if (pkt->flags & SNOOP_COMMIT)
+ snoop(pkt);
}
}
return true;
@@ -95,7 +87,7 @@ doAtomicAccess(Packet *pkt, bool isCpuSide)
if (isCpuSide)
{
//Temporary solution to LL/SC
- if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
+ if (pkt->isWrite() && (pkt->req->isLocked())) {
pkt->req->setScResult(1);
}
@@ -125,7 +117,7 @@ doFunctionalAccess(Packet *pkt, bool isCpuSide)
pkt->req->setThreadContext(0,0);
//Temporary solution to LL/SC
- if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
+ if (pkt->isWrite() && (pkt->req->isLocked())) {
assert("Can't handle LL/SC on functional path\n");
}
@@ -211,9 +203,8 @@ Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
pkt->getAddr() & (((ULL(1))<<48)-1),
pkt->getAddr() & ~((Addr)blkSize - 1));
- //@todo Should this return latency have the hit latency in it?
-// respond(pkt,curTick+lat);
pkt->flags |= SATISFIED;
+ //Invalidates/Upgrades need no response if they get the bus
// return MA_HIT; //@todo, return values
return true;
}
@@ -243,9 +234,9 @@ Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
missQueue->doWriteback(writebacks.front());
writebacks.pop_front();
}
- DPRINTF(Cache, "%s %x %s blk_addr: %x pc %x\n", pkt->cmdString(),
+ DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
- pkt->getAddr() & ~((Addr)blkSize - 1), pkt->req->getPC());
+ pkt->getAddr() & ~((Addr)blkSize - 1));
if (blk) {
// Hit
hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
@@ -294,10 +285,10 @@ Cache<TagStore,Buffering,Coherence>::getPacket()
template<class TagStore, class Buffering, class Coherence>
void
-Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, bool success)
+Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
{
if (success) {
- missQueue->markInService(pkt);
+ missQueue->markInService(pkt, mshr);
//Temp Hack for UPGRADES
if (pkt->cmd == Packet::UpgradeReq) {
handleResponse(pkt);
@@ -313,6 +304,13 @@ Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
{
BlkType *blk = NULL;
if (pkt->senderState) {
+ if (pkt->result == Packet::Nacked) {
+ pkt->reinitFromRequest();
+ panic("Unimplemented NACK of packet\n");
+ }
+ if (pkt->result == Packet::BadAddress) {
+ //Make the response a Bad address and send it
+ }
// MemDebug::cacheResponse(pkt);
DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
pkt->getAddr() & (((ULL(1))<<48)-1));
@@ -321,9 +319,11 @@ Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
blk = tags->findBlock(pkt);
CacheBlk::State old_state = (blk) ? blk->status : 0;
PacketList writebacks;
+ CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
+ DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
+ pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
- coherence->getNewState(pkt,old_state),
- writebacks, pkt);
+ new_state, writebacks, pkt);
while (!writebacks.empty()) {
missQueue->doWriteback(writebacks.front());
writebacks.pop_front();
@@ -394,9 +394,9 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
//If the outstanding request was an invalidate (upgrade,readex,..)
//Then we need to ACK the request until we get the data
//Also NACK if the outstanding request is not a cachefill (writeback)
+ assert(!(pkt->flags & SATISFIED));
pkt->flags |= SATISFIED;
pkt->flags |= NACKED_LINE;
- assert("Don't detect these on the other side yet\n");
respondToSnoop(pkt, curTick + hitLatency);
return;
}
@@ -410,7 +410,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
//@todo Make it so that a read to a pending read can't be exclusive now.
//Set the address so find match works
- assert("Don't have invalidates yet\n");
+ panic("Don't have invalidates yet\n");
invalidatePkt->addrOverride(pkt->getAddr());
//Append the invalidate on
@@ -433,6 +433,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
if (pkt->isRead()) {
//Only Upgrades don't get here
//Supply the data
+ assert(!(pkt->flags & SATISFIED));
pkt->flags |= SATISFIED;
//If we are in an exclusive protocol, make it ask again
@@ -451,7 +452,7 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
if (pkt->isInvalidate()) {
//This must be an upgrade or other cache will take ownership
- missQueue->markInService(mshr->pkt);
+ missQueue->markInService(mshr->pkt, mshr);
}
return;
}
@@ -461,10 +462,16 @@ Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
CacheBlk::State new_state;
bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
if (satisfy) {
+ DPRINTF(Cache, "Cache snooped a %s request and now supplying data,"
+ "new state is %i\n",
+ pkt->cmdString(), new_state);
+
tags->handleSnoop(blk, new_state, pkt);
respondToSnoop(pkt, curTick + hitLatency);
return;
}
+ if (blk) DPRINTF(Cache, "Cache snooped a %s request, new state is %i\n",
+ pkt->cmdString(), new_state);
tags->handleSnoop(blk, new_state);
}
@@ -610,7 +617,7 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update, CachePort
// update the cache state and statistics
if (mshr || !writes.empty()){
// Can't handle it, return pktuest unsatisfied.
- return 0;
+ panic("Atomic access ran into outstanding MSHR's or WB's!");
}
if (!pkt->req->isUncacheable()) {
// Fetch the cache block to fill
@@ -627,7 +634,9 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update, CachePort
lat = memSidePort->sendAtomic(busPkt);
//Be sure to flip the response to a request for coherence
- busPkt->makeAtomicResponse();
+ if (busPkt->needsResponse()) {
+ busPkt->makeAtomicResponse();
+ }
/* if (!(busPkt->flags & SATISFIED)) {
// blocked at a higher level, just return
@@ -662,7 +671,7 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update, CachePort
hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
} else if (pkt->isWrite()) {
// Still need to change data in all locations.
- return otherSidePort->sendAtomic(pkt);
+ otherSidePort->sendFunctional(pkt);
}
return curTick + lat;
}
@@ -680,9 +689,15 @@ Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
CacheBlk::State new_state = 0;
bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
if (satisfy) {
+ DPRINTF(Cache, "Cache snooped a %s request and now supplying data,"
+ "new state is %i\n",
+ pkt->cmdString(), new_state);
+
tags->handleSnoop(blk, new_state, pkt);
return hitLatency;
}
+ if (blk) DPRINTF(Cache, "Cache snooped a %s request, new state is %i\n",
+ pkt->cmdString(), new_state);
tags->handleSnoop(blk, new_state);
return 0;
}