diff options
author | Andreas Hansson <andreas.hansson@arm.com> | 2015-03-02 04:00:35 -0500 |
---|---|---|
committer | Andreas Hansson <andreas.hansson@arm.com> | 2015-03-02 04:00:35 -0500 |
commit | f26a28929583f2ed7fb55521e49c3f9bef557c05 (patch) | |
tree | e5d71fc69566b02a394015776b0f3f4e3be81427 /src/mem/cache | |
parent | 6ebe8d863ae0c5a7799e9421da32593ac35e1cc7 (diff) | |
download | gem5-f26a28929583f2ed7fb55521e49c3f9bef557c05.tar.xz |
mem: Split port retry for all different packet classes
This patch fixes a long-standing isue with the port flow
control. Before this patch the retry mechanism was shared between all
different packet classes. As a result, a snoop response could get
stuck behind a request waiting for a retry, even if the send/recv
functions were split. This caused message-dependent deadlocks in
stress-test scenarios.
The patch splits the retry into one per packet (message) class. Thus,
sendTimingReq has a corresponding recvReqRetry, sendTimingResp has
recvRespRetry etc. Most of the changes to the code involve simply
clarifying what type of request a specific object was accepting.
The biggest change in functionality is in the cache downstream packet
queue, facing the memory. This queue was shared by requests and snoop
responses, and it is now split into two queues, each with their own
flow control, but the same physical MasterPort. These changes fixes
the previously seen deadlocks.
Diffstat (limited to 'src/mem/cache')
-rw-r--r-- | src/mem/cache/base.cc | 2 | ||||
-rw-r--r-- | src/mem/cache/base.hh | 12 | ||||
-rw-r--r-- | src/mem/cache/cache.hh | 15 | ||||
-rw-r--r-- | src/mem/cache/cache_impl.hh | 118 |
4 files changed, 88 insertions, 59 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index 78e2ca9ab..cf55b8591 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -122,7 +122,7 @@ BaseCache::CacheSlavePort::processSendRetry() // reset the flag and call retry mustSendRetry = false; - sendRetry(); + sendRetryReq(); } void diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh index beb818961..bda3df34a 100644 --- a/src/mem/cache/base.hh +++ b/src/mem/cache/base.hh @@ -125,20 +125,20 @@ class BaseCache : public MemObject /** * Schedule a send of a request packet (from the MSHR). Note - * that we could already have a retry or a transmit list of - * responses outstanding. + * that we could already have a retry outstanding. */ void requestBus(RequestCause cause, Tick time) { DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause); - queue.schedSendEvent(time); + reqQueue.schedSendEvent(time); } protected: CacheMasterPort(const std::string &_name, BaseCache *_cache, - MasterPacketQueue &_queue) : - QueuedMasterPort(_name, _cache, _queue) + ReqPacketQueue &_reqQueue, + SnoopRespPacketQueue &_snoopRespQueue) : + QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) { } /** @@ -176,7 +176,7 @@ class BaseCache : public MemObject const std::string &_label); /** A normal packet queue used to store responses. */ - SlavePacketQueue queue; + RespPacketQueue queue; bool blocked; diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index 21a00dbbd..0ee776e92 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -114,18 +114,21 @@ class Cache : public BaseCache * current MSHR status. This queue has a pointer to our specific * cache implementation and is used by the MemSidePort. */ - class MemSidePacketQueue : public MasterPacketQueue + class CacheReqPacketQueue : public ReqPacketQueue { protected: Cache<TagStore> &cache; + SnoopRespPacketQueue &snoopRespQueue; public: - MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port, - const std::string &label) : - MasterPacketQueue(cache, port, label), cache(cache) { } + CacheReqPacketQueue(Cache<TagStore> &cache, MasterPort &port, + SnoopRespPacketQueue &snoop_resp_queue, + const std::string &label) : + ReqPacketQueue(cache, port, label), cache(cache), + snoopRespQueue(snoop_resp_queue) { } /** * Override the normal sendDeferredPacket and do not only @@ -145,7 +148,9 @@ class Cache : public BaseCache private: /** The cache-specific queue. */ - MemSidePacketQueue _queue; + CacheReqPacketQueue _reqQueue; + + SnoopRespPacketQueue _snoopRespQueue; // a pointer to our specific cache implementation Cache<TagStore> *cache; diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 14e49e1f7..803b3bad8 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -2183,61 +2183,84 @@ Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) template<class TagStore> void -Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket() +Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket() { - // if we have a response packet waiting we have to start with that - if (deferredPacketReady()) { - // use the normal approach from the timing port - trySendTiming(); + // sanity check + assert(!waitingOnRetry); + + // there should never be any deferred request packets in the + // queue, instead we resly on the cache to provide the packets + // from the MSHR queue or write queue + assert(deferredPacketReadyTime() == MaxTick); + + // check for request packets (requests & writebacks) + PacketPtr pkt = cache.getTimingPacket(); + if (pkt == NULL) { + // can happen if e.g. we attempt a writeback and fail, but + // before the retry, the writeback is eliminated because + // we snoop another cache's ReadEx. } else { - // check for request packets (requests & writebacks) - PacketPtr pkt = cache.getTimingPacket(); - if (pkt == NULL) { - // can happen if e.g. we attempt a writeback and fail, but - // before the retry, the writeback is eliminated because - // we snoop another cache's ReadEx. - waitingOnRetry = false; - } else { - MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); + MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); + // in most cases getTimingPacket allocates a new packet, and + // we must delete it unless it is successfully sent + bool delete_pkt = !mshr->isForwardNoResponse(); + + // let our snoop responses go first if there are responses to + // the same addresses we are about to writeback, note that + // this creates a dependency between requests and snoop + // responses, but that should not be a problem since there is + // a chain already and the key is that the snoop responses can + // sink unconditionally + if (snoopRespQueue.hasAddr(pkt->getAddr())) { + DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); + Tick when = snoopRespQueue.deferredPacketReadyTime(); + schedSendEvent(when); + + if (delete_pkt) + delete pkt; - waitingOnRetry = !masterPort.sendTimingReq(pkt); + return; + } - if (waitingOnRetry) { - DPRINTF(CachePort, "now waiting on a retry\n"); - if (!mshr->isForwardNoResponse()) { - // we are awaiting a retry, but we - // delete the packet and will be creating a new packet - // when we get the opportunity - delete pkt; - } - // note that we have now masked any requestBus and - // schedSendEvent (we will wait for a retry before - // doing anything), and this is so even if we do not - // care about this packet and might override it before - // it gets retried - } else { - // As part of the call to sendTimingReq the packet is - // forwarded to all neighbouring caches (and any - // caches above them) as a snoop. The packet is also - // sent to any potential cache below as the - // interconnect is not allowed to buffer the - // packet. Thus at this point we know if any of the - // neighbouring, or the downstream cache is - // responding, and if so, if it is with a dirty line - // or not. - bool pending_dirty_resp = !pkt->sharedAsserted() && - pkt->memInhibitAsserted(); - - cache.markInService(mshr, pending_dirty_resp); + + waitingOnRetry = !masterPort.sendTimingReq(pkt); + + if (waitingOnRetry) { + DPRINTF(CachePort, "now waiting on a retry\n"); + if (delete_pkt) { + // we are awaiting a retry, but we + // delete the packet and will be creating a new packet + // when we get the opportunity + delete pkt; } + // note that we have now masked any requestBus and + // schedSendEvent (we will wait for a retry before + // doing anything), and this is so even if we do not + // care about this packet and might override it before + // it gets retried + } else { + // As part of the call to sendTimingReq the packet is + // forwarded to all neighbouring caches (and any + // caches above them) as a snoop. The packet is also + // sent to any potential cache below as the + // interconnect is not allowed to buffer the + // packet. Thus at this point we know if any of the + // neighbouring, or the downstream cache is + // responding, and if so, if it is with a dirty line + // or not. + bool pending_dirty_resp = !pkt->sharedAsserted() && + pkt->memInhibitAsserted(); + + cache.markInService(mshr, pending_dirty_resp); } } // if we succeeded and are not waiting for a retry, schedule the - // next send, not only looking at the response transmit list, but - // also considering when the next MSHR is ready + // next send considering when the next MSHR is ready, note that + // snoop responses have their own packet queue and thus schedule + // their own events if (!waitingOnRetry) { - scheduleSend(cache.nextMSHRReadyTime()); + schedSendEvent(cache.nextMSHRReadyTime()); } } @@ -2245,8 +2268,9 @@ template<class TagStore> Cache<TagStore>:: MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache, const std::string &_label) - : BaseCache::CacheMasterPort(_name, _cache, _queue), - _queue(*_cache, *this, _label), cache(_cache) + : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), + _reqQueue(*_cache, *this, _snoopRespQueue, _label), + _snoopRespQueue(*_cache, *this, _label), cache(_cache) { } |