summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cpu/memtest/memtest.hh4
-rw-r--r--src/mem/bridge.cc6
-rw-r--r--src/mem/bus.cc43
-rw-r--r--src/mem/cache/base_cache.cc313
-rw-r--r--src/mem/cache/base_cache.hh323
-rw-r--r--src/mem/cache/cache.cc6
-rw-r--r--src/mem/cache/cache.hh58
-rw-r--r--src/mem/cache/cache_builder.cc10
-rw-r--r--src/mem/cache/cache_impl.hh319
-rw-r--r--src/mem/cache/coherence/SConscript1
-rw-r--r--src/mem/cache/coherence/coherence_protocol.cc3
-rw-r--r--src/mem/cache/coherence/simple_coherence.hh6
-rw-r--r--src/mem/cache/coherence/uni_coherence.cc135
-rw-r--r--src/mem/cache/coherence/uni_coherence.hh146
-rw-r--r--src/mem/cache/miss/blocking_buffer.cc14
-rw-r--r--src/mem/cache/miss/miss_queue.cc16
-rw-r--r--src/mem/cache/miss/mshr_queue.cc1
-rw-r--r--src/mem/cache/prefetch/base_prefetcher.cc8
-rw-r--r--src/mem/packet.cc11
-rw-r--r--src/mem/packet.hh6
20 files changed, 475 insertions, 954 deletions
diff --git a/src/cpu/memtest/memtest.hh b/src/cpu/memtest/memtest.hh
index 123ee2a6c..a6b08d61c 100644
--- a/src/cpu/memtest/memtest.hh
+++ b/src/cpu/memtest/memtest.hh
@@ -85,13 +85,13 @@ class MemTest : public MemObject
TickEvent(MemTest *c)
: Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) {}
void process() {cpu->tick();}
- virtual const char *description() { return "tick event"; }
+ virtual const char *description() { return "MemTest tick"; }
};
TickEvent tickEvent;
+
class CpuPort : public Port
{
-
MemTest *memtest;
public:
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc
index 04b0308e1..eebf91a85 100644
--- a/src/mem/bridge.cc
+++ b/src/mem/bridge.cc
@@ -112,10 +112,6 @@ Bridge::BridgePort::reqQueueFull()
bool
Bridge::BridgePort::recvTiming(PacketPtr pkt)
{
- if (!(pkt->flags & SNOOP_COMMIT))
- return true;
-
-
DPRINTF(BusBridge, "recvTiming: src %d dest %d addr 0x%x\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr());
@@ -253,8 +249,6 @@ Bridge::BridgePort::trySend()
PacketPtr pkt = buf->pkt;
- pkt->flags &= ~SNOOP_COMMIT; //CLear it if it was set
-
// Ugly! @todo When multilevel coherence works this will be removed
if (pkt->cmd == MemCmd::WriteInvalidateReq && fixPartialWrite &&
pkt->result != Packet::Nacked) {
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index 1f96115b8..ec33bd4c5 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -182,8 +182,10 @@ Bus::recvTiming(PacketPtr pkt)
// If the bus is busy, or other devices are in line ahead of the current
// one, put this device on the retry list.
- if (tickNextIdle > curTick ||
- (retryList.size() && (!inRetry || pktPort != retryList.front()))) {
+ if (!(pkt->flags & EXPRESS_SNOOP) &&
+ tickNextIdle > curTick ||
+ (retryList.size() && (!inRetry || pktPort != retryList.front())))
+ {
addToRetryList(pktPort);
DPRINTF(Bus, "recvTiming: Bus is busy, returning false\n");
return false;
@@ -195,31 +197,18 @@ Bus::recvTiming(PacketPtr pkt)
// access has been handled twice.
if (dest == Packet::Broadcast) {
port = findPort(pkt->getAddr(), pkt->getSrc());
- pkt->flags &= ~SNOOP_COMMIT;
- if (timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()])) {
- bool success;
-
- pkt->flags |= SNOOP_COMMIT;
- success = timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
- assert(success);
-
- if (pkt->flags & SATISFIED) {
- //Cache-Cache transfer occuring
- if (inRetry) {
- retryList.front()->onRetryList(false);
- retryList.pop_front();
- inRetry = false;
- }
- occupyBus(pkt);
- DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n");
- return true;
+ timingSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
+
+ if (pkt->flags & SATISFIED) {
+ //Cache-Cache transfer occuring
+ if (inRetry) {
+ retryList.front()->onRetryList(false);
+ retryList.pop_front();
+ inRetry = false;
}
- } else {
- //Snoop didn't succeed
- DPRINTF(Bus, "Adding1 a retry to RETRY list %d\n",
- pktPort->getId());
- addToRetryList(pktPort);
- return false;
+ occupyBus(pkt);
+ DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n");
+ return true;
}
} else {
assert(dest >= 0 && dest < maxId);
@@ -400,7 +389,6 @@ Bus::recvAtomic(PacketPtr pkt)
DPRINTF(Bus, "recvAtomic: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast);
- pkt->flags |= SNOOP_COMMIT;
// Assume one bus cycle in order to get through. This may have
// some clock skew issues yet again...
@@ -425,7 +413,6 @@ Bus::recvFunctional(PacketPtr pkt)
DPRINTF(Bus, "recvFunctional: packet src %d dest %d addr 0x%x cmd %s\n",
pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString());
assert(pkt->getDest() == Packet::Broadcast);
- pkt->flags |= SNOOP_COMMIT;
Port* port = findPort(pkt->getAddr(), pkt->getSrc());
functionalSnoop(pkt, port ? port : interfaces[pkt->getSrc()]);
diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc
index 8aac02460..a47c19e60 100644
--- a/src/mem/cache/base_cache.cc
+++ b/src/mem/cache/base_cache.cc
@@ -40,28 +40,29 @@
using namespace std;
-BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
- bool _isCpuSide)
- : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
+BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache)
+ : Port(_name, _cache), cache(_cache), otherPort(NULL)
{
blocked = false;
waitingOnRetry = false;
- //Start ports at null if more than one is created we should panic
- //cpuSidePort = NULL;
- //memSidePort = NULL;
}
-void
-BaseCache::CachePort::recvStatusChange(Port::Status status)
+BaseCache::BaseCache(const std::string &name, Params &params)
+ : MemObject(name),
+ blocked(0), blockedSnoop(0),
+ blkSize(params.blkSize),
+ missCount(params.maxMisses), drainEvent(NULL)
{
- cache->recvStatusChange(status, isCpuSide);
}
+
void
-BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
+BaseCache::CachePort::recvStatusChange(Port::Status status)
{
- cache->getAddressRanges(resp, snoop, isCpuSide);
+ if (status == Port::RangeChange) {
+ otherPort->sendStatusChange(Port::RangeChange);
+ }
}
int
@@ -114,92 +115,99 @@ BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
sendFunctional(pkt);
}
+
void
-BaseCache::CachePort::recvRetry()
+BaseCache::CachePort::respond(PacketPtr pkt, Tick time)
{
- PacketPtr pkt;
- assert(waitingOnRetry);
- if (!drainList.empty()) {
- DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
- , name(), drainList.size());
- //We have some responses to drain first
- pkt = drainList.front();
- drainList.pop_front();
- if (sendTiming(pkt)) {
- DPRINTF(CachePort, "%s sucessful in sending a retry for"
- "response (%i still waiting)\n", name(), drainList.size());
- if (!drainList.empty() ||
- !isCpuSide && cache->doMasterRequest() ||
- isCpuSide && cache->doSlaveRequest()) {
-
- DPRINTF(CachePort, "%s has more responses/requests\n", name());
- new BaseCache::RequestEvent(this, curTick + 1);
- }
- waitingOnRetry = false;
- }
- else {
- drainList.push_front(pkt);
+ assert(time >= curTick);
+ if (pkt->needsResponse()) {
+ if (transmitList.empty()) {
+ assert(!responseEvent->scheduled());
+ responseEvent->schedule(time);
+ transmitList.push_back(std::pair<Tick,PacketPtr>(time,pkt));
+ return;
}
- // Check if we're done draining once this list is empty
- if (drainList.empty())
- cache->checkDrain();
- }
- else if (!isCpuSide)
- {
- DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
- if (!cache->doMasterRequest()) {
- //This can happen if I am the owner of a block and see an upgrade
- //while the block was in my WB Buffers. I just remove the
- //wb and de-assert the masterRequest
- waitingOnRetry = false;
+
+ // something is on the list and this belongs at the end
+ if (time >= transmitList.back().first) {
+ transmitList.push_back(std::pair<Tick,PacketPtr>(time,pkt));
return;
}
- pkt = cache->getPacket();
- MSHR* mshr = (MSHR*) pkt->senderState;
- //Copy the packet, it may be modified/destroyed elsewhere
- PacketPtr copyPkt = new Packet(*pkt);
- copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
- mshr->pkt = copyPkt;
-
- bool success = sendTiming(pkt);
- DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
- pkt->getAddr(), success ? "succesful" : "unsuccesful");
-
- waitingOnRetry = !success;
- if (waitingOnRetry) {
- DPRINTF(CachePort, "%s now waiting on a retry\n", name());
+ // Something is on the list and this belongs somewhere else
+ std::list<std::pair<Tick,PacketPtr> >::iterator i =
+ transmitList.begin();
+ std::list<std::pair<Tick,PacketPtr> >::iterator end =
+ transmitList.end();
+ bool done = false;
+
+ while (i != end && !done) {
+ if (time < i->first) {
+ if (i == transmitList.begin()) {
+ //Inserting at begining, reschedule
+ responseEvent->reschedule(time);
+ }
+ transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
+ done = true;
+ }
+ i++;
}
+ }
+ else {
+ assert(0);
+ // this code was on the cpuSidePort only... do we still need it?
+ if (pkt->cmd != MemCmd::UpgradeReq)
+ {
+ delete pkt->req;
+ delete pkt;
+ }
+ }
+}
- cache->sendResult(pkt, mshr, success);
+bool
+BaseCache::CachePort::drainResponse()
+{
+ DPRINTF(CachePort,
+ "%s attempting to send a retry for response (%i waiting)\n",
+ name(), drainList.size());
+ //We have some responses to drain first
+ PacketPtr pkt = drainList.front();
+ if (sendTiming(pkt)) {
+ drainList.pop_front();
+ DPRINTF(CachePort, "%s sucessful in sending a retry for"
+ "response (%i still waiting)\n", name(), drainList.size());
+ if (!drainList.empty() || isBusRequested()) {
- if (success && cache->doMasterRequest())
- {
- DPRINTF(CachePort, "%s has more requests\n", name());
- //Still more to issue, rerequest in 1 cycle
- new BaseCache::RequestEvent(this, curTick + 1);
+ DPRINTF(CachePort, "%s has more responses/requests\n", name());
+ return false;
}
+ } else {
+ waitingOnRetry = true;
+ DPRINTF(CachePort, "%s now waiting on a retry\n", name());
}
- else
- {
- assert(cache->doSlaveRequest());
- //pkt = cache->getCoherencePacket();
- //We save the packet, no reordering on CSHRS
- pkt = cache->getCoherencePacket();
- MSHR* cshr = (MSHR*)pkt->senderState;
- bool success = sendTiming(pkt);
- cache->sendCoherenceResult(pkt, cshr, success);
- waitingOnRetry = !success;
- if (success && cache->doSlaveRequest())
- {
- DPRINTF(CachePort, "%s has more requests\n", name());
- //Still more to issue, rerequest in 1 cycle
- new BaseCache::RequestEvent(this, curTick + 1);
+ return true;
+}
+
+
+bool
+BaseCache::CachePort::recvRetryCommon()
+{
+ assert(waitingOnRetry);
+ waitingOnRetry = false;
+ if (!drainList.empty()) {
+ if (!drainResponse()) {
+ // more responses to drain... re-request bus
+ scheduleRequestEvent(curTick + 1);
}
+ // Check if we're done draining once this list is empty
+ if (drainList.empty()) {
+ cache->checkDrain();
+ }
+ return true;
}
- if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
- else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
- return;
+ return false;
}
+
+
void
BaseCache::CachePort::setBlocked()
{
@@ -224,143 +232,6 @@ BaseCache::CachePort::clearBlocked()
}
}
-BaseCache::RequestEvent::RequestEvent(CachePort *_cachePort, Tick when)
- : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
-{
- this->setFlags(AutoDelete);
- schedule(when);
-}
-
-void
-BaseCache::RequestEvent::process()
-{
- if (cachePort->waitingOnRetry) return;
- //We have some responses to drain first
- if (!cachePort->drainList.empty()) {
- DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
- if (cachePort->sendTiming(cachePort->drainList.front())) {
- DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
- cachePort->drainList.pop_front();
- if (!cachePort->drainList.empty() ||
- !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
- cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
-
- DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
- this->schedule(curTick + 1);
- }
- }
- else {
- cachePort->waitingOnRetry = true;
- DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
- }
- }
- else if (!cachePort->isCpuSide)
- { //MSHR
- DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
- if (!cachePort->cache->doMasterRequest()) {
- //This can happen if I am the owner of a block and see an upgrade
- //while the block was in my WB Buffers. I just remove the
- //wb and de-assert the masterRequest
- return;
- }
-
- PacketPtr pkt = cachePort->cache->getPacket();
- MSHR* mshr = (MSHR*) pkt->senderState;
- //Copy the packet, it may be modified/destroyed elsewhere
- PacketPtr copyPkt = new Packet(*pkt);
- copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
- mshr->pkt = copyPkt;
-
- bool success = cachePort->sendTiming(pkt);
- DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
- pkt->getAddr(), success ? "succesful" : "unsuccesful");
-
- cachePort->waitingOnRetry = !success;
- if (cachePort->waitingOnRetry) {
- DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
- }
-
- cachePort->cache->sendResult(pkt, mshr, success);
- if (success && cachePort->cache->doMasterRequest())
- {
- DPRINTF(CachePort, "%s still more MSHR requests to send\n",
- cachePort->name());
- //Still more to issue, rerequest in 1 cycle
- this->schedule(curTick+1);
- }
- }
- else
- {
- //CSHR
- assert(cachePort->cache->doSlaveRequest());
- PacketPtr pkt = cachePort->cache->getCoherencePacket();
- MSHR* cshr = (MSHR*) pkt->senderState;
- bool success = cachePort->sendTiming(pkt);
- cachePort->cache->sendCoherenceResult(pkt, cshr, success);
- cachePort->waitingOnRetry = !success;
- if (cachePort->waitingOnRetry)
- DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
- if (success && cachePort->cache->doSlaveRequest())
- {
- DPRINTF(CachePort, "%s still more CSHR requests to send\n",
- cachePort->name());
- //Still more to issue, rerequest in 1 cycle
- this->schedule(curTick+1);
- }
- }
-}
-
-const char *
-BaseCache::RequestEvent::description()
-{
- return "Cache request event";
-}
-
-BaseCache::ResponseEvent::ResponseEvent(CachePort *_cachePort)
- : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
-{
-}
-
-void
-BaseCache::ResponseEvent::process()
-{
- assert(cachePort->transmitList.size());
- assert(cachePort->transmitList.front().first <= curTick);
- PacketPtr pkt = cachePort->transmitList.front().second;
- cachePort->transmitList.pop_front();
- if (!cachePort->transmitList.empty()) {
- Tick time = cachePort->transmitList.front().first;
- schedule(time <= curTick ? curTick+1 : time);
- }
-
- if (pkt->flags & NACKED_LINE)
- pkt->result = Packet::Nacked;
- else
- pkt->result = Packet::Success;
- pkt->makeTimingResponse();
- DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
- if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
- //Already have a list, just append
- cachePort->drainList.push_back(pkt);
- DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
- }
- else if (!cachePort->sendTiming(pkt)) {
- //It failed, save it to list of drain events
- DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
- cachePort->drainList.push_back(pkt);
- cachePort->waitingOnRetry = true;
- }
-
- // Check if we're done draining once this list is empty
- if (cachePort->drainList.empty() && cachePort->transmitList.empty())
- cachePort->cache->checkDrain();
-}
-
-const char *
-BaseCache::ResponseEvent::description()
-{
- return "Cache response event";
-}
void
BaseCache::init()
diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh
index f06a79dc0..a27ac1788 100644
--- a/src/mem/cache/base_cache.hh
+++ b/src/mem/cache/base_cache.hh
@@ -26,6 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
+ * Steve Reinhardt
+ * Ron Dreslinski
*/
/**
@@ -83,17 +85,19 @@ class BaseCache : public MemObject
BaseCache *cache;
protected:
- CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
- virtual void recvStatusChange(Status status);
+ Event *responseEvent;
+
+ CachePort(const std::string &_name, BaseCache *_cache);
- virtual void getDeviceAddressRanges(AddrRangeList &resp,
- bool &snoop);
+ virtual void recvStatusChange(Status status);
virtual int deviceBlockSize();
- virtual void recvRetry();
+ bool recvRetryCommon();
public:
+ void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
+
void setBlocked();
void clearBlocked();
@@ -104,65 +108,52 @@ class BaseCache : public MemObject
bool canDrain() { return drainList.empty() && transmitList.empty(); }
+ bool drainResponse();
+
+ CachePort *otherPort;
+
bool blocked;
bool mustSendRetry;
- bool isCpuSide;
-
bool waitingOnRetry;
+ /**
+ * Bit vector for the outstanding requests for the master interface.
+ */
+ uint8_t requestCauses;
+
std::list<PacketPtr> drainList;
std::list<std::pair<Tick,PacketPtr> > transmitList;
- };
- struct RequestEvent : public Event
- {
- CachePort *cachePort;
+ bool isBusRequested() { return requestCauses != 0; }
- RequestEvent(CachePort *_cachePort, Tick when);
- void process();
- const char *description();
- };
+ // These need to be virtual since the Event objects depend on
+ // cache template parameters.
+ virtual void scheduleRequestEvent(Tick t) = 0;
- struct ResponseEvent : public Event
- {
- CachePort *cachePort;
+ void requestBus(RequestCause cause, Tick time)
+ {
+ if (!isBusRequested() && !waitingOnRetry) {
+ scheduleRequestEvent(time);
+ }
+ requestCauses |= (1 << cause);
+ }
+
+ void deassertBusRequest(RequestCause cause)
+ {
+ requestCauses &= ~(1 << cause);
+ }
- ResponseEvent(CachePort *_cachePort);
- void process();
- const char *description();
+ void respond(PacketPtr pkt, Tick time);
};
public: //Made public so coherence can get at it.
CachePort *cpuSidePort;
CachePort *memSidePort;
- ResponseEvent *sendEvent;
- ResponseEvent *memSendEvent;
-
private:
- void recvStatusChange(Port::Status status, bool isCpuSide)
- {
- if (status == Port::RangeChange){
- if (!isCpuSide) {
- cpuSidePort->sendStatusChange(Port::RangeChange);
- }
- else {
- memSidePort->sendStatusChange(Port::RangeChange);
- }
- }
- }
-
- virtual PacketPtr getPacket() = 0;
-
- virtual PacketPtr getCoherencePacket() = 0;
-
- virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
-
- virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
-
/**
* Bit vector of the blocking reasons for the access path.
* @sa #BlockedCause
@@ -175,16 +166,6 @@ class BaseCache : public MemObject
*/
uint8_t blockedSnoop;
- /**
- * Bit vector for the outstanding requests for the master interface.
- */
- uint8_t masterRequests;
-
- /**
- * Bit vector for the outstanding requests for the slave interface.
- */
- uint8_t slaveRequests;
-
protected:
/** Stores time the cache blocked for statistics. */
@@ -309,20 +290,10 @@ class BaseCache : public MemObject
* of this cache.
* @param params The parameter object for this BaseCache.
*/
- BaseCache(const std::string &name, Params &params)
- : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
- slaveRequests(0), blkSize(params.blkSize),
- missCount(params.maxMisses), drainEvent(NULL)
- {
- //Start ports at null if more than one is created we should panic
- cpuSidePort = NULL;
- memSidePort = NULL;
- }
+ BaseCache(const std::string &name, Params &params);
~BaseCache()
{
- delete sendEvent;
- delete memSendEvent;
}
virtual void init();
@@ -422,12 +393,12 @@ class BaseCache : public MemObject
}
/**
- * True if the master bus should be requested.
+ * True if the memory-side bus should be requested.
* @return True if there are outstanding requests for the master bus.
*/
- bool doMasterRequest()
+ bool isMemSideBusRequested()
{
- return masterRequests != 0;
+ return memSidePort->isBusRequested();
}
/**
@@ -435,59 +406,18 @@ class BaseCache : public MemObject
* @param cause The reason for the request.
* @param time The time to make the request.
*/
- void setMasterRequest(RequestCause cause, Tick time)
+ void requestMemSideBus(RequestCause cause, Tick time)
{
- if (!doMasterRequest() && !memSidePort->waitingOnRetry)
- {
- new RequestEvent(memSidePort, time);
- }
- uint8_t flag = 1<<cause;
- masterRequests |= flag;
+ memSidePort->requestBus(cause, time);
}
/**
* Clear the master bus request for the given cause.
* @param cause The request reason to clear.
*/
- void clearMasterRequest(RequestCause cause)
- {
- uint8_t flag = 1<<cause;
- masterRequests &= ~flag;
- checkDrain();
- }
-
- /**
- * Return true if the slave bus should be requested.
- * @return True if there are outstanding requests for the slave bus.
- */
- bool doSlaveRequest()
- {
- return slaveRequests != 0;
- }
-
- /**
- * Request the slave bus for the given reason and time.
- * @param cause The reason for the request.
- * @param time The time to make the request.
- */
- void setSlaveRequest(RequestCause cause, Tick time)
- {
- if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
- {
- new RequestEvent(cpuSidePort, time);
- }
- uint8_t flag = 1<<cause;
- slaveRequests |= flag;
- }
-
- /**
- * Clear the slave bus request for the given reason.
- * @param cause The request reason to clear.
- */
- void clearSlaveRequest(RequestCause cause)
+ void deassertMemSideBusRequest(RequestCause cause)
{
- uint8_t flag = 1<<cause;
- slaveRequests &= ~flag;
+ memSidePort->deassertBusRequest(cause);
checkDrain();
}
@@ -498,111 +428,7 @@ class BaseCache : public MemObject
*/
void respond(PacketPtr pkt, Tick time)
{
- assert(time >= curTick);
- if (pkt->needsResponse()) {
-/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
- reqCpu->schedule(time);
-*/
- if (cpuSidePort->transmitList.empty()) {
- assert(!sendEvent->scheduled());
- sendEvent->schedule(time);
- cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
- (time,pkt));
- return;
- }
-
- // something is on the list and this belongs at the end
- if (time >= cpuSidePort->transmitList.back().first) {
- cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
- (time,pkt));
- return;
- }
- // Something is on the list and this belongs somewhere else
- std::list<std::pair<Tick,PacketPtr> >::iterator i =
- cpuSidePort->transmitList.begin();
- std::list<std::pair<Tick,PacketPtr> >::iterator end =
- cpuSidePort->transmitList.end();
- bool done = false;
-
- while (i != end && !done) {
- if (time < i->first) {
- if (i == cpuSidePort->transmitList.begin()) {
- //Inserting at begining, reschedule
- sendEvent->reschedule(time);
- }
- cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
- (time,pkt));
- done = true;
- }
- i++;
- }
- }
- else {
- if (pkt->cmd != MemCmd::UpgradeReq)
- {
- delete pkt->req;
- delete pkt;
- }
- }
- }
-
- /**
- * Send a reponse to the slave interface and calculate miss latency.
- * @param pkt The request to respond to.
- * @param time The time the response is ready.
- */
- void respondToMiss(PacketPtr pkt, Tick time)
- {
- assert(time >= curTick);
- if (!pkt->req->isUncacheable()) {
- missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
- time - pkt->time;
- }
- if (pkt->needsResponse()) {
-/* CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
- reqCpu->schedule(time);
-*/
- if (cpuSidePort->transmitList.empty()) {
- assert(!sendEvent->scheduled());
- sendEvent->schedule(time);
- cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
- (time,pkt));
- return;
- }
-
- // something is on the list and this belongs at the end
- if (time >= cpuSidePort->transmitList.back().first) {
- cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
- (time,pkt));
- return;
- }
- // Something is on the list and this belongs somewhere else
- std::list<std::pair<Tick,PacketPtr> >::iterator i =
- cpuSidePort->transmitList.begin();
- std::list<std::pair<Tick,PacketPtr> >::iterator end =
- cpuSidePort->transmitList.end();
- bool done = false;
-
- while (i != end && !done) {
- if (time < i->first) {
- if (i == cpuSidePort->transmitList.begin()) {
- //Inserting at begining, reschedule
- sendEvent->reschedule(time);
- }
- cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
- (time,pkt));
- done = true;
- }
- i++;
- }
- }
- else {
- if (pkt->cmd != MemCmd::UpgradeReq)
- {
- delete pkt->req;
- delete pkt;
- }
- }
+ cpuSidePort->respond(pkt, time);
}
/**
@@ -611,64 +437,7 @@ class BaseCache : public MemObject
*/
void respondToSnoop(PacketPtr pkt, Tick time)
{
- assert(time >= curTick);
- assert (pkt->needsResponse());
-/* CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
- reqMem->schedule(time);
-*/
- if (memSidePort->transmitList.empty()) {
- assert(!memSendEvent->scheduled());
- memSendEvent->schedule(time);
- memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
- (time,pkt));
- return;
- }
-
- // something is on the list and this belongs at the end
- if (time >= memSidePort->transmitList.back().first) {
- memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
- (time,pkt));
- return;
- }
- // Something is on the list and this belongs somewhere else
- std::list<std::pair<Tick,PacketPtr> >::iterator i =
- memSidePort->transmitList.begin();
- std::list<std::pair<Tick,PacketPtr> >::iterator end =
- memSidePort->transmitList.end();
- bool done = false;
-
- while (i != end && !done) {
- if (time < i->first) {
- if (i == memSidePort->transmitList.begin()) {
- //Inserting at begining, reschedule
- memSendEvent->reschedule(time);
- }
- memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
- done = true;
- }
- i++;
- }
- }
-
- /**
- * Notification from master interface that a address range changed. Nothing
- * to do for a cache.
- */
- void rangeChange() {}
-
- void getAddressRanges(AddrRangeList &resp, bool &snoop, bool isCpuSide)
- {
- if (isCpuSide)
- {
- bool dummy;
- memSidePort->getPeerAddressRanges(resp, dummy);
- }
- else
- {
- //This is where snoops get updated
- AddrRangeList dummy;
- snoop = true;
- }
+ memSidePort->respond(pkt, time);
}
virtual unsigned int drain(Event *de);
@@ -685,7 +454,7 @@ class BaseCache : public MemObject
bool canDrain()
{
- if (doMasterRequest() || doSlaveRequest()) {
+ if (isMemSideBusRequested()) {
return false;
} else if (memSidePort && !memSidePort->canDrain()) {
return false;
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index cb4e7f62e..2b4e7b9c8 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -61,7 +61,6 @@
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
-#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/coherence/simple_coherence.hh"
#include "mem/cache/cache_impl.hh"
@@ -72,27 +71,22 @@
#if defined(USE_CACHE_FALRU)
template class Cache<FALRU, SimpleCoherence>;
-template class Cache<FALRU, UniCoherence>;
#endif
#if defined(USE_CACHE_IIC)
template class Cache<IIC, SimpleCoherence>;
-template class Cache<IIC, UniCoherence>;
#endif
#if defined(USE_CACHE_LRU)
template class Cache<LRU, SimpleCoherence>;
-template class Cache<LRU, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT)
template class Cache<Split, SimpleCoherence>;
-template class Cache<Split, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
template class Cache<SplitLIFO, SimpleCoherence>;
-template class Cache<SplitLIFO, UniCoherence>;
#endif
#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index 3e45c85d2..e14b2efe8 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -28,6 +28,7 @@
* Authors: Erik Hallnor
* Dave Greene
* Steve Reinhardt
+ * Ron Dreslinski
*/
/**
@@ -46,6 +47,8 @@
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/miss_buffer.hh"
+#include "sim/eventq.hh"
+
//Forward decleration
class MSHR;
class BasePrefetcher;
@@ -83,11 +86,29 @@ class Cache : public BaseCache
return static_cast<Cache<TagStore,Coherence> *>(cache);
}
+ void processRequestEvent();
+ void processResponseEvent();
+
+ virtual void getDeviceAddressRanges(AddrRangeList &resp,
+ bool &snoop);
+
virtual bool recvTiming(PacketPtr pkt);
+ virtual void recvRetry();
+
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
+
+ typedef EventWrapper<CpuSidePort, &CpuSidePort::processResponseEvent>
+ ResponseEvent;
+
+ typedef EventWrapper<CpuSidePort, &CpuSidePort::processRequestEvent>
+ RequestEvent;
+
+ virtual void scheduleRequestEvent(Tick t) {
+ new RequestEvent(this, t);
+ }
};
class MemSidePort : public CachePort
@@ -103,11 +124,29 @@ class Cache : public BaseCache
return static_cast<Cache<TagStore,Coherence> *>(cache);
}
+ void processRequestEvent();
+ void processResponseEvent();
+
+ virtual void getDeviceAddressRanges(AddrRangeList &resp,
+ bool &snoop);
+
virtual bool recvTiming(PacketPtr pkt);
+ virtual void recvRetry();
+
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
+
+ typedef EventWrapper<MemSidePort, &MemSidePort::processResponseEvent>
+ ResponseEvent;
+
+ typedef EventWrapper<MemSidePort, &MemSidePort::processRequestEvent>
+ RequestEvent;
+
+ virtual void scheduleRequestEvent(Tick t) {
+ new RequestEvent(this, t);
+ }
};
/** Tag and data Storage */
@@ -339,8 +378,6 @@ class Cache : public BaseCache
virtual Port *getPort(const std::string &if_name, int idx = -1);
virtual void deletePortRefs(Port *p);
- virtual void recvStatusChange(Port::Status status, bool isCpuSide);
-
void regStats();
/**
@@ -354,21 +391,14 @@ class Cache : public BaseCache
* Selects a request to send on the bus.
* @return The memory request to service.
*/
- virtual PacketPtr getPacket();
+ PacketPtr getPacket();
/**
* Was the request was sent successfully?
* @param pkt The request.
* @param success True if the request was sent successfully.
*/
- virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success);
-
- /**
- * Was the CSHR request was sent successfully?
- * @param pkt The request.
- * @param success True if the request was sent successfully.
- */
- virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* cshr, bool success);
+ void sendResult(PacketPtr &pkt, MSHR* mshr, bool success);
/**
* Handles a response (cache line fill/write ack) from the bus.
@@ -377,12 +407,6 @@ class Cache : public BaseCache
void handleResponse(PacketPtr &pkt);
/**
- * Selects a coherence message to forward to lower levels of the hierarchy.
- * @return The coherence message to forward.
- */
- virtual PacketPtr getCoherencePacket();
-
- /**
* Snoops bus transactions to maintain coherence.
* @param pkt The current bus transaction.
*/
diff --git a/src/mem/cache/cache_builder.cc b/src/mem/cache/cache_builder.cc
index e887f711e..bc1a8a775 100644
--- a/src/mem/cache/cache_builder.cc
+++ b/src/mem/cache/cache_builder.cc
@@ -75,7 +75,6 @@
#include "mem/cache/miss/blocking_buffer.hh"
// Coherence Templates
-#include "mem/cache/coherence/uni_coherence.hh"
#include "mem/cache/coherence/simple_coherence.hh"
//Prefetcher Headers
@@ -302,13 +301,8 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache)
} while (0)
#define BUILD_COHERENCE(b) do { \
- if (protocol == NULL) { \
- UniCoherence *coh = new UniCoherence(); \
- BUILD_CACHES(UniCoherence); \
- } else { \
- SimpleCoherence *coh = new SimpleCoherence(protocol); \
- BUILD_CACHES(SimpleCoherence); \
- } \
+ SimpleCoherence *coh = new SimpleCoherence(protocol); \
+ BUILD_CACHES(SimpleCoherence); \
} while (0)
#if defined(USE_TAGGED)
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 9b094c1e3..a7f96603e 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -28,6 +28,8 @@
* Authors: Erik Hallnor
* Dave Greene
* Nathan Binkert
+ * Steve Reinhardt
+ * Ron Dreslinski
*/
/**
@@ -57,18 +59,8 @@
bool SIGNAL_NACK_HACK;
template<class TagStore, class Coherence>
-void
-Cache<TagStore,Coherence>::
-recvStatusChange(Port::Status status, bool isCpuSide)
-{
-
-}
-
-
-template<class TagStore, class Coherence>
-Cache<TagStore,Coherence>::
-Cache(const std::string &_name,
- Cache<TagStore,Coherence>::Params &params)
+Cache<TagStore,Coherence>::Cache(const std::string &_name,
+ Cache<TagStore,Coherence>::Params &params)
: BaseCache(_name, params.baseParams),
prefetchAccess(params.prefetchAccess),
tags(params.tags), missQueue(params.missQueue),
@@ -84,6 +76,11 @@ Cache(const std::string &_name,
adaptiveCompression(params.adaptiveCompression),
writebackCompressed(params.writebackCompressed)
{
+ cpuSidePort = new CpuSidePort(_name + "-cpu_side_port", this);
+ memSidePort = new MemSidePort(_name + "-mem_side_port", this);
+ cpuSidePort->setOtherPort(memSidePort);
+ memSidePort->setOtherPort(cpuSidePort);
+
tags->setCache(this);
missQueue->setCache(this);
missQueue->setPrefetcher(prefetcher);
@@ -406,7 +403,11 @@ Cache<TagStore,Coherence>::handleFill(BlkType *blk, MSHR * mshr,
// mshr->pkt = pkt;
break;
}
- respondToMiss(target, completion_time);
+ if (!target->req->isUncacheable()) {
+ missLatency[target->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
+ completion_time - target->time;
+ }
+ respond(target, completion_time);
mshr->popTarget();
}
@@ -688,7 +689,7 @@ Cache<TagStore,Coherence>::getPacket()
}
}
- assert(!doMasterRequest() || missQueue->havePending());
+ assert(!isMemSideBusRequested() || missQueue->havePending());
assert(!pkt || pkt->time <= curTick);
SIGNAL_NACK_HACK = false;
return pkt;
@@ -727,7 +728,6 @@ Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
pkt->flags &= ~NACKED_LINE;
SIGNAL_NACK_HACK = false;
pkt->flags &= ~SATISFIED;
- pkt->flags &= ~SNOOP_COMMIT;
//Rmove copy from mshr
delete mshr->pkt;
@@ -783,22 +783,6 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
}
}
-template<class TagStore, class Coherence>
-PacketPtr
-Cache<TagStore,Coherence>::getCoherencePacket()
-{
- return coherence->getPacket();
-}
-
-template<class TagStore, class Coherence>
-void
-Cache<TagStore,Coherence>::sendCoherenceResult(PacketPtr &pkt,
- MSHR *cshr,
- bool success)
-{
- coherence->sendResult(pkt, cshr, success);
-}
-
template<class TagStore, class Coherence>
void
@@ -810,14 +794,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
return;
}
- //Send a timing (true) invalidate up if the protocol calls for it
- if (coherence->propogateInvalidate(pkt, true)) {
- //Temp hack, we had a functional read hit in the L1, mark as success
- pkt->flags |= SATISFIED;
- pkt->result = Packet::Success;
- respondToSnoop(pkt, curTick + hitLatency);
- return;
- }
+ ///// PROPAGATE SNOOP UPWARD HERE
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
BlkType *blk = tags->findBlock(pkt->getAddr());
@@ -1113,13 +1090,7 @@ template<class TagStore, class Coherence>
Tick
Cache<TagStore,Coherence>::snoopProbe(PacketPtr &pkt)
{
- //Send a atomic (false) invalidate up if the protocol calls for it
- if (coherence->propogateInvalidate(pkt, false)) {
- //Temp hack, we had a functional read hit in the L1, mark as success
- pkt->flags |= SATISFIED;
- pkt->result = Packet::Success;
- return hitLatency;
- }
+ ///// PROPAGATE SNOOP UPWARD HERE
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
BlkType *blk = tags->findBlock(pkt->getAddr());
@@ -1146,27 +1117,15 @@ template<class TagStore, class Coherence>
Port *
Cache<TagStore,Coherence>::getPort(const std::string &if_name, int idx)
{
- if (if_name == "" || if_name == "cpu_side")
- {
- if (cpuSidePort == NULL) {
- cpuSidePort = new CpuSidePort(name() + "-cpu_side_port", this);
- sendEvent = new ResponseEvent(cpuSidePort);
- }
+ if (if_name == "" || if_name == "cpu_side") {
return cpuSidePort;
- }
- else if (if_name == "functional")
- {
- return new CpuSidePort(name() + "-cpu_side_funcport", this);
- }
- else if (if_name == "mem_side")
- {
- if (memSidePort != NULL)
- panic("Already have a mem side for this cache\n");
- memSidePort = new MemSidePort(name() + "-mem_side_port", this);
- memSendEvent = new ResponseEvent(memSidePort);
+ } else if (if_name == "mem_side") {
return memSidePort;
+ } else if (if_name == "functional") {
+ return new CpuSidePort(name() + "-cpu_side_funcport", this);
+ } else {
+ panic("Port name %s unrecognized\n", if_name);
}
- else panic("Port name %s unrecognized\n", if_name);
}
template<class TagStore, class Coherence>
@@ -1181,6 +1140,18 @@ Cache<TagStore,Coherence>::deletePortRefs(Port *p)
template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::CpuSidePort::
+getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
+{
+ // CPU side port doesn't snoop; it's a target only.
+ bool dummy;
+ otherPort->getPeerAddressRanges(resp, dummy);
+ snoop = false;
+}
+
+
+template<class TagStore, class Coherence>
bool
Cache<TagStore,Coherence>::CpuSidePort::recvTiming(PacketPtr pkt)
{
@@ -1213,6 +1184,68 @@ Cache<TagStore,Coherence>::CpuSidePort::recvTiming(PacketPtr pkt)
return true;
}
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::CpuSidePort::recvRetry()
+{
+ recvRetryCommon();
+}
+
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::CpuSidePort::processRequestEvent()
+{
+ if (waitingOnRetry)
+ return;
+ //We have some responses to drain first
+ if (!drainList.empty()) {
+ if (!drainResponse()) {
+ // more responses to drain... re-request bus
+ scheduleRequestEvent(curTick + 1);
+ }
+ }
+}
+
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::CpuSidePort::processResponseEvent()
+{
+ assert(transmitList.size());
+ assert(transmitList.front().first <= curTick);
+ PacketPtr pkt = transmitList.front().second;
+ transmitList.pop_front();
+ if (!transmitList.empty()) {
+ Tick time = transmitList.front().first;
+ responseEvent->schedule(time <= curTick ? curTick+1 : time);
+ }
+
+ if (pkt->flags & NACKED_LINE)
+ pkt->result = Packet::Nacked;
+ else
+ pkt->result = Packet::Success;
+ pkt->makeTimingResponse();
+ DPRINTF(CachePort, "%s attempting to send a response\n", name());
+ if (!drainList.empty() || waitingOnRetry) {
+ //Already have a list, just append
+ drainList.push_back(pkt);
+ DPRINTF(CachePort, "%s appending response onto drain list\n", name());
+ }
+ else if (!sendTiming(pkt)) {
+ //It failed, save it to list of drain events
+ DPRINTF(CachePort, "%s now waiting for a retry\n", name());
+ drainList.push_back(pkt);
+ waitingOnRetry = true;
+ }
+
+ // Check if we're done draining once this list is empty
+ if (drainList.empty() && transmitList.empty())
+ myCache()->checkDrain();
+}
+
+
template<class TagStore, class Coherence>
Tick
Cache<TagStore,Coherence>::CpuSidePort::recvAtomic(PacketPtr pkt)
@@ -1240,6 +1273,18 @@ Cache<TagStore,Coherence>::CpuSidePort::recvFunctional(PacketPtr pkt)
template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::MemSidePort::
+getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
+{
+ // Memory-side port always snoops.
+ bool dummy;
+ otherPort->getPeerAddressRanges(resp, dummy);
+ snoop = true;
+}
+
+
+template<class TagStore, class Coherence>
bool
Cache<TagStore,Coherence>::MemSidePort::recvTiming(PacketPtr pkt)
{
@@ -1249,24 +1294,150 @@ Cache<TagStore,Coherence>::MemSidePort::recvTiming(PacketPtr pkt)
if (pkt->result == Packet::Nacked)
panic("Need to implement cache resending nacked packets!\n");
- if (pkt->isRequest() && blocked)
- {
+ if (pkt->isRequest() && blocked) {
DPRINTF(Cache,"Scheduling a retry while blocked\n");
mustSendRetry = true;
return false;
}
- if (pkt->isResponse())
+ if (pkt->isResponse()) {
myCache()->handleResponse(pkt);
- else {
- //Check if we should do the snoop
- if (pkt->flags & SNOOP_COMMIT)
- myCache()->snoop(pkt);
+ } else {
+ myCache()->snoop(pkt);
}
return true;
}
template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::MemSidePort::recvRetry()
+{
+ if (recvRetryCommon()) {
+ return;
+ }
+
+ DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
+ if (!cache->isMemSideBusRequested()) {
+ //This can happen if I am the owner of a block and see an upgrade
+ //while the block was in my WB Buffers. I just remove the
+ //wb and de-assert the masterRequest
+ waitingOnRetry = false;
+ return;
+ }
+ PacketPtr pkt = myCache()->getPacket();
+ MSHR* mshr = (MSHR*) pkt->senderState;
+ //Copy the packet, it may be modified/destroyed elsewhere
+ PacketPtr copyPkt = new Packet(*pkt);
+ copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
+ mshr->pkt = copyPkt;
+
+ bool success = sendTiming(pkt);
+ DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
+ pkt->getAddr(), success ? "succesful" : "unsuccesful");
+
+ waitingOnRetry = !success;
+ if (waitingOnRetry) {
+ DPRINTF(CachePort, "%s now waiting on a retry\n", name());
+ }
+
+ myCache()->sendResult(pkt, mshr, success);
+
+ if (success && cache->isMemSideBusRequested())
+ {
+ DPRINTF(CachePort, "%s has more requests\n", name());
+ //Still more to issue, rerequest in 1 cycle
+ new RequestEvent(this, curTick + 1);
+ }
+}
+
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::MemSidePort::processRequestEvent()
+{
+ if (waitingOnRetry)
+ return;
+ //We have some responses to drain first
+ if (!drainList.empty()) {
+ if (!drainResponse()) {
+ // more responses to drain... re-request bus
+ scheduleRequestEvent(curTick + 1);
+ }
+ return;
+ }
+
+ DPRINTF(CachePort, "%s trying to send a MSHR request\n", name());
+ if (!isBusRequested()) {
+ //This can happen if I am the owner of a block and see an upgrade
+ //while the block was in my WB Buffers. I just remove the
+ //wb and de-assert the masterRequest
+ return;
+ }
+
+ PacketPtr pkt = myCache()->getPacket();
+ MSHR* mshr = (MSHR*) pkt->senderState;
+ //Copy the packet, it may be modified/destroyed elsewhere
+ PacketPtr copyPkt = new Packet(*pkt);
+ copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
+ mshr->pkt = copyPkt;
+
+ bool success = sendTiming(pkt);
+ DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
+ pkt->getAddr(), success ? "succesful" : "unsuccesful");
+
+ waitingOnRetry = !success;
+ if (waitingOnRetry) {
+ DPRINTF(CachePort, "%s now waiting on a retry\n", name());
+ }
+
+ myCache()->sendResult(pkt, mshr, success);
+ if (success && isBusRequested())
+ {
+ DPRINTF(CachePort, "%s still more MSHR requests to send\n", name());
+ //Still more to issue, rerequest in 1 cycle
+ scheduleRequestEvent(curTick+1);
+ }
+}
+
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::MemSidePort::processResponseEvent()
+{
+ assert(transmitList.size());
+ assert(transmitList.front().first <= curTick);
+ PacketPtr pkt = transmitList.front().second;
+ transmitList.pop_front();
+ if (!transmitList.empty()) {
+ Tick time = transmitList.front().first;
+ responseEvent->schedule(time <= curTick ? curTick+1 : time);
+ }
+
+ if (pkt->flags & NACKED_LINE)
+ pkt->result = Packet::Nacked;
+ else
+ pkt->result = Packet::Success;
+ pkt->makeTimingResponse();
+ DPRINTF(CachePort, "%s attempting to send a response\n", name());
+ if (!drainList.empty() || waitingOnRetry) {
+ //Already have a list, just append
+ drainList.push_back(pkt);
+ DPRINTF(CachePort, "%s appending response onto drain list\n", name());
+ }
+ else if (!sendTiming(pkt)) {
+ //It failed, save it to list of drain events
+ DPRINTF(CachePort, "%s now waiting for a retry\n", name());
+ drainList.push_back(pkt);
+ waitingOnRetry = true;
+ }
+
+ // Check if we're done draining once this list is empty
+ if (drainList.empty() && transmitList.empty())
+ myCache()->checkDrain();
+}
+
+
+template<class TagStore, class Coherence>
Tick
Cache<TagStore,Coherence>::MemSidePort::recvAtomic(PacketPtr pkt)
{
@@ -1292,15 +1463,17 @@ template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
CpuSidePort::CpuSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache)
- : BaseCache::CachePort(_name, _cache, true)
+ : BaseCache::CachePort(_name, _cache)
{
+ responseEvent = new ResponseEvent(this);
}
template<class TagStore, class Coherence>
Cache<TagStore,Coherence>::
MemSidePort::MemSidePort(const std::string &_name,
Cache<TagStore,Coherence> *_cache)
- : BaseCache::CachePort(_name, _cache, false)
+ : BaseCache::CachePort(_name, _cache)
{
+ responseEvent = new ResponseEvent(this);
}
diff --git a/src/mem/cache/coherence/SConscript b/src/mem/cache/coherence/SConscript
index 4f5966140..91720b20e 100644
--- a/src/mem/cache/coherence/SConscript
+++ b/src/mem/cache/coherence/SConscript
@@ -33,5 +33,4 @@ Import('*')
SimObject('CoherenceProtocol.py')
Source('coherence_protocol.cc')
-Source('uni_coherence.cc')
diff --git a/src/mem/cache/coherence/coherence_protocol.cc b/src/mem/cache/coherence/coherence_protocol.cc
index 33a8a4e63..bc8de0d26 100644
--- a/src/mem/cache/coherence/coherence_protocol.cc
+++ b/src/mem/cache/coherence/coherence_protocol.cc
@@ -295,11 +295,14 @@ CoherenceProtocol::CoherenceProtocol(const string &name,
tt[Invalid][MC::ReadReq].onRequest(MC::ReadReq);
// we only support write allocate right now
tt[Invalid][MC::WriteReq].onRequest(MC::ReadExReq);
+ tt[Invalid][MC::ReadExReq].onRequest(MC::ReadExReq);
tt[Invalid][MC::SwapReq].onRequest(MC::ReadExReq);
tt[Shared][MC::WriteReq].onRequest(writeToSharedCmd);
+ tt[Shared][MC::ReadExReq].onRequest(MC::ReadExReq);
tt[Shared][MC::SwapReq].onRequest(writeToSharedCmd);
if (hasOwned) {
tt[Owned][MC::WriteReq].onRequest(writeToSharedCmd);
+ tt[Owned][MC::ReadExReq].onRequest(MC::ReadExReq);
tt[Owned][MC::SwapReq].onRequest(writeToSharedCmd);
}
diff --git a/src/mem/cache/coherence/simple_coherence.hh b/src/mem/cache/coherence/simple_coherence.hh
index 1c89c703a..095260ca4 100644
--- a/src/mem/cache/coherence/simple_coherence.hh
+++ b/src/mem/cache/coherence/simple_coherence.hh
@@ -161,12 +161,6 @@ class SimpleCoherence
bool allowFastWrites() { return false; }
bool hasProtocol() { return true; }
-
- bool propogateInvalidate(PacketPtr pkt, bool isTiming)
- {
- //For now we do nothing, asssumes simple coherence is top level of cache
- return false;
- }
};
#endif //__SIMPLE_COHERENCE_HH__
diff --git a/src/mem/cache/coherence/uni_coherence.cc b/src/mem/cache/coherence/uni_coherence.cc
deleted file mode 100644
index 6061c89c3..000000000
--- a/src/mem/cache/coherence/uni_coherence.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-#include "mem/cache/coherence/uni_coherence.hh"
-#include "mem/cache/base_cache.hh"
-
-#include "base/trace.hh"
-
-using namespace std;
-
-UniCoherence::UniCoherence()
- : cshrs(50)
-{
-}
-
-PacketPtr
-UniCoherence::getPacket()
-{
- PacketPtr pkt = cshrs.getReq();
- return pkt;
-}
-
-void
-UniCoherence::sendResult(PacketPtr &pkt, MSHR* cshr, bool success)
-{
- if (success)
- {
- bool unblock = cshrs.isFull();
-// cshrs.markInService(cshr);
- delete pkt->req;
- cshrs.deallocate(cshr);
- if (!cshrs.havePending()) {
- cache->clearSlaveRequest(Request_Coherence);
- }
- if (unblock) {
- //since CSHRs are always used as buffers, should always get rid of one
- assert(!cshrs.isFull());
- cache->clearBlocked(Blocked_Coherence);
- }
- }
-}
-
-
-/**
- * @todo add support for returning slave requests, not doing them here.
- */
-bool
-UniCoherence::handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
- CacheBlk::State &new_state)
-{
- new_state = 0;
- if (pkt->isInvalidate()) {
- DPRINTF(Cache, "snoop inval on blk %x (blk ptr %x)\n",
- pkt->getAddr(), blk);
- }
- else if (blk) {
- new_state = blk->status;
- if (pkt->isRead()) {
- DPRINTF(Cache, "Uni-coherence snoops a read that hit in itself"
- ". Should satisfy the packet\n");
- return true; //Satisfy Reads if we can
- }
- }
- return false;
-}
-
-bool
-UniCoherence::propogateInvalidate(PacketPtr pkt, bool isTiming)
-{
- if (pkt->isInvalidate()) {
-/* Temp Fix for now, forward all invalidates up as functional accesses */
- if (isTiming) {
- // Forward to other caches
- Request* req = new Request(pkt->req->getPaddr(), pkt->getSize(), 0);
- PacketPtr tmp = new Packet(req, MemCmd::InvalidateReq, -1);
- cshrs.allocate(tmp);
- cache->setSlaveRequest(Request_Coherence, curTick);
- if (cshrs.isFull())
- cache->setBlockedForSnoop(Blocked_Coherence);
- }
- else {
- PacketPtr tmp = new Packet(pkt->req, MemCmd::InvalidateReq, -1);
- cache->cpuSidePort->sendAtomic(tmp);
- delete tmp;
- }
-/**/
-/* PacketPtr tmp = new Packet(pkt->req, MemCmd::InvalidateReq, -1);
- cache->cpuSidePort->sendFunctional(tmp);
- delete tmp;
-*/
- }
- if (pkt->isRead()) {
- /*For now we will see if someone above us has the data by
- doing a functional access on reads. Fix this later */
- PacketPtr tmp = new Packet(pkt->req, MemCmd::ReadReq, -1);
- tmp->allocate();
- cache->cpuSidePort->sendFunctional(tmp);
- bool hit = (tmp->result == Packet::Success);
- if (hit) {
- memcpy(pkt->getPtr<uint8_t>(), tmp->getPtr<uint8_t>(),
- pkt->getSize());
- DPRINTF(Cache, "Uni-coherence snoops a read that hit in L1\n");
- }
- delete tmp;
- return hit;
- }
- return false;
-}
diff --git a/src/mem/cache/coherence/uni_coherence.hh b/src/mem/cache/coherence/uni_coherence.hh
deleted file mode 100644
index 9efb4e192..000000000
--- a/src/mem/cache/coherence/uni_coherence.hh
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-#ifndef __UNI_COHERENCE_HH__
-#define __UNI_COHERENCE_HH__
-
-#include "base/trace.hh"
-#include "base/misc.hh"
-#include "mem/cache/cache_blk.hh"
-#include "mem/cache/miss/mshr_queue.hh"
-#include "mem/packet.hh"
-
-class BaseCache;
-
-class UniCoherence
-{
- protected:
- /** Buffers to hold forwarded invalidates. */
- MSHRQueue cshrs;
- /** Pointer to the parent cache. */
- BaseCache *cache;
-
- public:
- /**
- * Construct and initialize this coherence policy.
- */
- UniCoherence();
-
- /**
- * Set the pointer to the parent cache.
- * @param _cache The parent cache.
- */
- void setCache(BaseCache *_cache)
- {
- cache = _cache;
- }
-
- /**
- * Register statistics.
- * @param name The name to prepend to stat descriptions.
- */
- void regStats(const std::string &name)
- {
- }
-
- /**
- * Return Read.
- * @param cmd The request's command.
- * @param state The current state of the cache block.
- * @return The proper bus command, as determined by the protocol.
- * @todo Make changes so writebacks don't get here.
- */
- MemCmd getBusCmd(MemCmd cmd, CacheBlk::State state)
- {
- if (cmd == MemCmd::HardPFReq && state)
- warn("Trying to issue a prefetch to a block we already have\n");
- if (cmd == MemCmd::Writeback)
- return MemCmd::Writeback;
- return MemCmd::ReadReq;
- }
-
- /**
- * Just return readable and writeable.
- * @param pkt The bus response.
- * @param current The current block state.
- * @return The new state.
- */
- CacheBlk::State getNewState(PacketPtr &pkt, CacheBlk::State current)
- {
- if (pkt->senderState) //Blocking Buffers don't get mshrs
- {
- if (((MSHR *)(pkt->senderState))->originalCmd == MemCmd::HardPFReq) {
- DPRINTF(HWPrefetch, "Marking a hardware prefetch as such in the state\n");
- return BlkHWPrefetched | BlkValid | BlkWritable;
- }
- else {
- return BlkValid | BlkWritable;
- }
- }
- //@todo What about prefetching with blocking buffers
- else
- return BlkValid | BlkWritable;
- }
-
- /**
- * Return outstanding invalidate to forward.
- * @return The next invalidate to forward to lower levels of cache.
- */
- PacketPtr getPacket();
-
- /**
- * Was the CSHR request was sent successfully?
- * @param pkt The request.
- * @param success True if the request was sent successfully.
- */
- void sendResult(PacketPtr &pkt, MSHR* cshr, bool success);
-
- /**
- * Handle snooped bus requests.
- * @param pkt The snooped bus request.
- * @param blk The cache block corresponding to the request, if any.
- * @param mshr The MSHR corresponding to the request, if any.
- * @param new_state The new coherence state of the block.
- * @return True if the request should be satisfied locally.
- */
- bool handleBusRequest(PacketPtr &pkt, CacheBlk *blk, MSHR *mshr,
- CacheBlk::State &new_state);
-
- /**
- * Return true if this coherence policy can handle fast cache writes.
- */
- bool allowFastWrites() { return true; }
-
- bool hasProtocol() { return false; }
-
- bool propogateInvalidate(PacketPtr pkt, bool isTiming);
-};
-
-#endif //__UNI_COHERENCE_HH__
diff --git a/src/mem/cache/miss/blocking_buffer.cc b/src/mem/cache/miss/blocking_buffer.cc
index e8ff26880..281328c2e 100644
--- a/src/mem/cache/miss/blocking_buffer.cc
+++ b/src/mem/cache/miss/blocking_buffer.cc
@@ -64,7 +64,7 @@ BlockingBuffer::handleMiss(PacketPtr &pkt, int blk_size, Tick time)
std::memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), blk_size);
cache->setBlocked(Blocked_NoWBBuffers);
- cache->setMasterRequest(Request_WB, time);
+ cache->requestMemSideBus(Request_WB, time);
return;
}
@@ -77,7 +77,7 @@ BlockingBuffer::handleMiss(PacketPtr &pkt, int blk_size, Tick time)
miss.pkt->flags |= CACHE_LINE_FILL;
}
cache->setBlocked(Blocked_NoMSHRs);
- cache->setMasterRequest(Request_MSHR, time);
+ cache->requestMemSideBus(Request_MSHR, time);
}
PacketPtr
@@ -111,7 +111,7 @@ BlockingBuffer::markInService(PacketPtr &pkt, MSHR* mshr)
// Forwarding a write/ writeback, don't need to change
// the command
assert(mshr == &wb);
- cache->clearMasterRequest(Request_WB);
+ cache->deassertMemSideBusRequest(Request_WB);
if (!pkt->needsResponse()) {
assert(wb.getNumTargets() == 0);
wb.deallocate();
@@ -121,7 +121,7 @@ BlockingBuffer::markInService(PacketPtr &pkt, MSHR* mshr)
}
} else {
assert(mshr == &miss);
- cache->clearMasterRequest(Request_MSHR);
+ cache->deassertMemSideBusRequest(Request_MSHR);
if (!pkt->needsResponse()) {
assert(miss.getNumTargets() == 0);
miss.deallocate();
@@ -178,7 +178,7 @@ BlockingBuffer::squash(int threadNum)
if (!miss.inService) {
miss.deallocate();
cache->clearBlocked(Blocked_NoMSHRs);
- cache->clearMasterRequest(Request_MSHR);
+ cache->deassertMemSideBusRequest(Request_MSHR);
}
}
}
@@ -203,7 +203,7 @@ BlockingBuffer::doWriteback(Addr addr,
writebacks[0/*pkt->req->getThreadNum()*/]++;
wb.allocateAsBuffer(pkt);
- cache->setMasterRequest(Request_WB, curTick);
+ cache->requestMemSideBus(Request_WB, curTick);
cache->setBlocked(Blocked_NoWBBuffers);
}
@@ -221,7 +221,7 @@ BlockingBuffer::doWriteback(PacketPtr &pkt)
std::memcpy(wb.pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
cache->setBlocked(Blocked_NoWBBuffers);
- cache->setMasterRequest(Request_WB, curTick);
+ cache->requestMemSideBus(Request_WB, curTick);
}
diff --git a/src/mem/cache/miss/miss_queue.cc b/src/mem/cache/miss/miss_queue.cc
index 24ca9cfa2..67036ed02 100644
--- a/src/mem/cache/miss/miss_queue.cc
+++ b/src/mem/cache/miss/miss_queue.cc
@@ -348,7 +348,7 @@ MissQueue::allocateMiss(PacketPtr &pkt, int size, Tick time)
}
if (pkt->cmd != MemCmd::HardPFReq) {
//If we need to request the bus (not on HW prefetch), do so
- cache->setMasterRequest(Request_MSHR, time);
+ cache->requestMemSideBus(Request_MSHR, time);
}
return mshr;
}
@@ -376,7 +376,7 @@ MissQueue::allocateWrite(PacketPtr &pkt, int size, Tick time)
cache->setBlocked(Blocked_NoWBBuffers);
}
- cache->setMasterRequest(Request_WB, time);
+ cache->requestMemSideBus(Request_WB, time);
return mshr;
}
@@ -450,7 +450,7 @@ MissQueue::fetchBlock(Addr addr, int blk_size, Tick time,
if (mq.isFull()) {
cache->setBlocked(Blocked_NoMSHRs);
}
- cache->setMasterRequest(Request_MSHR, time);
+ cache->requestMemSideBus(Request_MSHR, time);
return mshr;
}
@@ -534,7 +534,7 @@ MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
unblock = wb.isFull();
wb.markInService(mshr);
if (!wb.havePending()){
- cache->clearMasterRequest(Request_WB);
+ cache->deassertMemSideBusRequest(Request_WB);
}
if (unblock) {
// Do we really unblock?
@@ -545,7 +545,7 @@ MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
unblock = mq.isFull();
mq.markInService(mshr);
if (!mq.havePending()){
- cache->clearMasterRequest(Request_MSHR);
+ cache->deassertMemSideBusRequest(Request_MSHR);
}
if (mshr->originalCmd == MemCmd::HardPFReq) {
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
@@ -553,7 +553,7 @@ MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
//Also clear pending if need be
if (!prefetcher->havePending())
{
- cache->clearMasterRequest(Request_PF);
+ cache->deassertMemSideBusRequest(Request_PF);
}
}
if (unblock) {
@@ -602,7 +602,7 @@ MissQueue::handleResponse(PacketPtr &pkt, Tick time)
mshr->pkt->req = mshr->getTarget()->req;
mq.markPending(mshr, cmd);
mshr->order = order++;
- cache->setMasterRequest(Request_MSHR, time);
+ cache->requestMemSideBus(Request_MSHR, time);
}
else {
unblock = mq.isFull();
@@ -683,7 +683,7 @@ MissQueue::squash(int threadNum)
}
mq.squash(threadNum);
if (!mq.havePending()) {
- cache->clearMasterRequest(Request_MSHR);
+ cache->deassertMemSideBusRequest(Request_MSHR);
}
if (unblock && !mq.isFull()) {
cache->clearBlocked(cause);
diff --git a/src/mem/cache/miss/mshr_queue.cc b/src/mem/cache/miss/mshr_queue.cc
index add11dfe7..e9aa89bf8 100644
--- a/src/mem/cache/miss/mshr_queue.cc
+++ b/src/mem/cache/miss/mshr_queue.cc
@@ -119,7 +119,6 @@ MSHRQueue::allocate(PacketPtr &pkt, int size)
if (!pkt->needsResponse()) {
mshr->allocateAsBuffer(pkt);
} else {
- assert(size !=0);
mshr->allocate(pkt->cmd, aligned_addr, size, pkt);
allocatedTargets += 1;
}
diff --git a/src/mem/cache/prefetch/base_prefetcher.cc b/src/mem/cache/prefetch/base_prefetcher.cc
index 44daf75e1..966f7d005 100644
--- a/src/mem/cache/prefetch/base_prefetcher.cc
+++ b/src/mem/cache/prefetch/base_prefetcher.cc
@@ -141,7 +141,7 @@ BasePrefetcher::getPacket()
keepTrying = cache->inCache(pkt->getAddr());
}
if (pf.empty()) {
- cache->clearMasterRequest(Request_PF);
+ cache->deassertMemSideBusRequest(Request_PF);
if (keepTrying) return NULL; //None left, all were in cache
}
} while (keepTrying);
@@ -165,7 +165,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
pfRemovedMSHR++;
pf.erase(iter);
if (pf.empty())
- cache->clearMasterRequest(Request_PF);
+ cache->deassertMemSideBusRequest(Request_PF);
}
//Remove anything in queue with delay older than time
@@ -182,7 +182,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
iter--;
}
if (pf.empty())
- cache->clearMasterRequest(Request_PF);
+ cache->deassertMemSideBusRequest(Request_PF);
}
@@ -244,7 +244,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
prefetch->flags |= CACHE_LINE_FILL;
//Make sure to request the bus, with proper delay
- cache->setMasterRequest(Request_PF, prefetch->time);
+ cache->requestMemSideBus(Request_PF, prefetch->time);
//Increment through the list
addr++;
diff --git a/src/mem/packet.cc b/src/mem/packet.cc
index 2463a19ba..8c69def37 100644
--- a/src/mem/packet.cc
+++ b/src/mem/packet.cc
@@ -56,17 +56,18 @@ MemCmd::commandInfo[] =
{ 0, InvalidCmd, "InvalidCmd" },
/* ReadReq */
{ SET3(IsRead, IsRequest, NeedsResponse), ReadResp, "ReadReq" },
+ /* ReadResp */
+ { SET3(IsRead, IsResponse, HasData), InvalidCmd, "ReadResp" },
/* WriteReq */
{ SET4(IsWrite, IsRequest, NeedsResponse, HasData),
WriteResp, "WriteReq" },
- /* WriteReqNoAck */
- { SET3(IsWrite, IsRequest, HasData), InvalidCmd, "WriteReqNoAck" },
- /* ReadResp */
- { SET3(IsRead, IsResponse, HasData), InvalidCmd, "ReadResp" },
/* WriteResp */
{ SET2(IsWrite, IsResponse), InvalidCmd, "WriteResp" },
/* Writeback */
- { SET3(IsWrite, IsRequest, HasData), InvalidCmd, "Writeback" },
+ { SET4(IsWrite, IsRequest, HasData, NeedsResponse),
+ WritebackAck, "Writeback" },
+ /* WritebackAck */
+ { SET2(IsWrite, IsResponse), InvalidCmd, "WritebackAck" },
/* SoftPFReq */
{ SET4(IsRead, IsRequest, IsSWPrefetch, NeedsResponse),
SoftPFResp, "SoftPFReq" },
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index dc23e9f6d..413ffa26b 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -61,8 +61,8 @@ typedef std::list<PacketPtr> PacketList;
#define CACHE_LINE_FILL (1 << 3)
#define COMPRESSED (1 << 4)
#define NO_ALLOCATE (1 << 5)
-#define SNOOP_COMMIT (1 << 6)
+#define EXPRESS_SNOOP (1 << 7)
class MemCmd
{
@@ -73,11 +73,11 @@ class MemCmd
{
InvalidCmd,
ReadReq,
- WriteReq,
- WriteReqNoAck,
ReadResp,
+ WriteReq,
WriteResp,
Writeback,
+ WritebackAck,
SoftPFReq,
HardPFReq,
SoftPFResp,