diff options
Diffstat (limited to 'src')
67 files changed, 560 insertions, 388 deletions
diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc index 3b82c1bd6..f2f11aa45 100644 --- a/src/arch/x86/pagetable_walker.cc +++ b/src/arch/x86/pagetable_walker.cc @@ -133,13 +133,13 @@ Walker::recvTimingResp(PacketPtr pkt) } void -Walker::WalkerPort::recvRetry() +Walker::WalkerPort::recvReqRetry() { - walker->recvRetry(); + walker->recvReqRetry(); } void -Walker::recvRetry() +Walker::recvReqRetry() { std::list<WalkerState *>::iterator iter; for (iter = currStates.begin(); iter != currStates.end(); iter++) { diff --git a/src/arch/x86/pagetable_walker.hh b/src/arch/x86/pagetable_walker.hh index c8ec549fe..181d6fb6c 100644 --- a/src/arch/x86/pagetable_walker.hh +++ b/src/arch/x86/pagetable_walker.hh @@ -77,7 +77,7 @@ namespace X86ISA void recvTimingSnoopReq(PacketPtr pkt) { } Tick recvAtomicSnoop(PacketPtr pkt) { return 0; } void recvFunctionalSnoop(PacketPtr pkt) { } - void recvRetry(); + void recvReqRetry(); bool isSnooping() const { return true; } }; @@ -190,7 +190,7 @@ namespace X86ISA // Functions for dealing with packets. bool recvTimingResp(PacketPtr pkt); - void recvRetry(); + void recvReqRetry(); bool sendTiming(WalkerState * sendingState, PacketPtr pkt); public: diff --git a/src/cpu/kvm/base.hh b/src/cpu/kvm/base.hh index dac4934cb..ce32cdbb1 100644 --- a/src/cpu/kvm/base.hh +++ b/src/cpu/kvm/base.hh @@ -561,9 +561,9 @@ class BaseKvmCPU : public BaseCPU return true; } - void recvRetry() + void recvReqRetry() { - panic("The KVM CPU doesn't expect recvRetry!\n"); + panic("The KVM CPU doesn't expect recvReqRetry!\n"); } }; diff --git a/src/cpu/minor/fetch1.cc b/src/cpu/minor/fetch1.cc index 79a5d0a78..567bd2ecc 100644 --- a/src/cpu/minor/fetch1.cc +++ b/src/cpu/minor/fetch1.cc @@ -417,7 +417,7 @@ Fetch1::recvTimingResp(PacketPtr response) } void -Fetch1::recvRetry() +Fetch1::recvReqRetry() { DPRINTF(Fetch, "recvRetry\n"); assert(icacheState == IcacheNeedsRetry); diff --git a/src/cpu/minor/fetch1.hh b/src/cpu/minor/fetch1.hh index 29e10def5..d4a35c468 100644 --- a/src/cpu/minor/fetch1.hh +++ b/src/cpu/minor/fetch1.hh @@ -77,7 +77,7 @@ class Fetch1 : public Named bool recvTimingResp(PacketPtr pkt) { return fetch.recvTimingResp(pkt); } - void recvRetry() { fetch.recvRetry(); } + void recvReqRetry() { fetch.recvReqRetry(); } }; /** Memory access queuing. @@ -345,7 +345,7 @@ class Fetch1 : public Named /** Memory interface */ virtual bool recvTimingResp(PacketPtr pkt); - virtual void recvRetry(); + virtual void recvReqRetry(); public: Fetch1(const std::string &name_, diff --git a/src/cpu/minor/lsq.cc b/src/cpu/minor/lsq.cc index 72873211b..06b6c5165 100644 --- a/src/cpu/minor/lsq.cc +++ b/src/cpu/minor/lsq.cc @@ -1235,7 +1235,7 @@ LSQ::recvTimingResp(PacketPtr response) } void -LSQ::recvRetry() +LSQ::recvReqRetry() { DPRINTF(MinorMem, "Received retry request\n"); diff --git a/src/cpu/minor/lsq.hh b/src/cpu/minor/lsq.hh index 7936ae8f0..8a7d78216 100644 --- a/src/cpu/minor/lsq.hh +++ b/src/cpu/minor/lsq.hh @@ -101,7 +101,7 @@ class LSQ : public Named bool recvTimingResp(PacketPtr pkt) { return lsq.recvTimingResp(pkt); } - void recvRetry() { lsq.recvRetry(); } + void recvReqRetry() { lsq.recvReqRetry(); } void recvTimingSnoopReq(PacketPtr pkt) { return lsq.recvTimingSnoopReq(pkt); } @@ -712,7 +712,7 @@ class LSQ : public Named /** Memory interface */ bool recvTimingResp(PacketPtr pkt); - void recvRetry(); + void recvReqRetry(); void recvTimingSnoopReq(PacketPtr pkt); /** Return the raw-bindable port */ diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index 434bfd7da..fc7643be2 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -101,9 +101,9 @@ FullO3CPU<Impl>::IcachePort::recvTimingResp(PacketPtr pkt) template<class Impl> void -FullO3CPU<Impl>::IcachePort::recvRetry() +FullO3CPU<Impl>::IcachePort::recvReqRetry() { - fetch->recvRetry(); + fetch->recvReqRetry(); } template <class Impl> @@ -126,9 +126,9 @@ FullO3CPU<Impl>::DcachePort::recvTimingSnoopReq(PacketPtr pkt) template <class Impl> void -FullO3CPU<Impl>::DcachePort::recvRetry() +FullO3CPU<Impl>::DcachePort::recvReqRetry() { - lsq->recvRetry(); + lsq->recvReqRetry(); } template <class Impl> diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh index 5b33285c4..c4ccd562b 100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@ -150,7 +150,7 @@ class FullO3CPU : public BaseO3CPU virtual void recvTimingSnoopReq(PacketPtr pkt) { } /** Handles doing a retry of a failed fetch. */ - virtual void recvRetry(); + virtual void recvReqRetry(); }; /** @@ -185,7 +185,7 @@ class FullO3CPU : public BaseO3CPU } /** Handles doing a retry of the previous send. */ - virtual void recvRetry(); + virtual void recvReqRetry(); /** * As this CPU requires snooping to maintain the load store queue diff --git a/src/cpu/o3/fetch.hh b/src/cpu/o3/fetch.hh index 968d94029..536568bc2 100644 --- a/src/cpu/o3/fetch.hh +++ b/src/cpu/o3/fetch.hh @@ -224,7 +224,7 @@ class DefaultFetch void startupStage(); /** Handles retrying the fetch access. */ - void recvRetry(); + void recvReqRetry(); /** Processes cache completion event. */ void processCacheCompletion(PacketPtr pkt); diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index d3b0b3ac5..a462d9251 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -1407,7 +1407,7 @@ DefaultFetch<Impl>::fetch(bool &status_change) template<class Impl> void -DefaultFetch<Impl>::recvRetry() +DefaultFetch<Impl>::recvReqRetry() { if (retryPkt != NULL) { assert(cacheBlocked); diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh index 5d57bb52b..d726088ef 100644 --- a/src/cpu/o3/lsq.hh +++ b/src/cpu/o3/lsq.hh @@ -286,7 +286,7 @@ class LSQ { /** * Retry the previous send that failed. */ - void recvRetry(); + void recvReqRetry(); /** * Handles writing back and completing the load or store that has diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index e0107e36a..06467243d 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -330,7 +330,7 @@ LSQ<Impl>::violation() template <class Impl> void -LSQ<Impl>::recvRetry() +LSQ<Impl>::recvReqRetry() { iewStage->cacheUnblocked(); diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh index a2f3927b4..5ad5c4305 100644 --- a/src/cpu/simple/atomic.hh +++ b/src/cpu/simple/atomic.hh @@ -139,7 +139,7 @@ class AtomicSimpleCPU : public BaseSimpleCPU return true; } - void recvRetry() + void recvReqRetry() { panic("Atomic CPU doesn't expect recvRetry!\n"); } diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 9171395b0..8b95696a3 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -729,7 +729,7 @@ TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) } void -TimingSimpleCPU::IcachePort::recvRetry() +TimingSimpleCPU::IcachePort::recvReqRetry() { // we shouldn't get a retry unless we have a packet that we're // waiting to transmit @@ -846,8 +846,8 @@ TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) // In the case of a split transaction and a cache that is // faster than a CPU we could get two responses in the // same tick, delay the second one - if (!retryEvent.scheduled()) - cpu->schedule(retryEvent, cpu->clockEdge(Cycles(1))); + if (!retryRespEvent.scheduled()) + cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1))); return false; } } @@ -859,7 +859,7 @@ TimingSimpleCPU::DcachePort::DTickEvent::process() } void -TimingSimpleCPU::DcachePort::recvRetry() +TimingSimpleCPU::DcachePort::recvReqRetry() { // we shouldn't get a retry unless we have a packet that we're // waiting to transmit diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh index d8460515b..3ce596fc7 100644 --- a/src/cpu/simple/timing.hh +++ b/src/cpu/simple/timing.hh @@ -157,7 +157,7 @@ class TimingSimpleCPU : public BaseSimpleCPU public: TimingCPUPort(const std::string& _name, TimingSimpleCPU* _cpu) - : MasterPort(_name, _cpu), cpu(_cpu), retryEvent(this) + : MasterPort(_name, _cpu), cpu(_cpu), retryRespEvent(this) { } protected: @@ -179,7 +179,7 @@ class TimingSimpleCPU : public BaseSimpleCPU void schedule(PacketPtr _pkt, Tick t); }; - EventWrapper<MasterPort, &MasterPort::sendRetry> retryEvent; + EventWrapper<MasterPort, &MasterPort::sendRetryResp> retryRespEvent; }; class IcachePort : public TimingCPUPort @@ -195,7 +195,7 @@ class TimingSimpleCPU : public BaseSimpleCPU virtual bool recvTimingResp(PacketPtr pkt); - virtual void recvRetry(); + virtual void recvReqRetry(); struct ITickEvent : public TickEvent { @@ -232,7 +232,7 @@ class TimingSimpleCPU : public BaseSimpleCPU virtual bool recvTimingResp(PacketPtr pkt); - virtual void recvRetry(); + virtual void recvReqRetry(); virtual bool isSnooping() const { return true; diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.hh b/src/cpu/testers/directedtest/RubyDirectedTester.hh index cb58fa63f..0e3b1002a 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.hh +++ b/src/cpu/testers/directedtest/RubyDirectedTester.hh @@ -60,7 +60,7 @@ class RubyDirectedTester : public MemObject protected: virtual bool recvTimingResp(PacketPtr pkt); - virtual void recvRetry() + virtual void recvReqRetry() { panic("%s does not expect a retry\n", name()); } }; diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index 1d2554852..b0dde6d27 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -63,7 +63,7 @@ MemTest::CpuPort::recvTimingResp(PacketPtr pkt) } void -MemTest::CpuPort::recvRetry() +MemTest::CpuPort::recvReqRetry() { memtest.recvRetry(); } diff --git a/src/cpu/testers/memtest/memtest.hh b/src/cpu/testers/memtest/memtest.hh index 0648f8c11..daed5e5d2 100644 --- a/src/cpu/testers/memtest/memtest.hh +++ b/src/cpu/testers/memtest/memtest.hh @@ -114,7 +114,7 @@ class MemTest : public MemObject Tick recvAtomicSnoop(PacketPtr pkt) { return 0; } - void recvRetry(); + void recvReqRetry(); }; CpuPort port; diff --git a/src/cpu/testers/networktest/networktest.cc b/src/cpu/testers/networktest/networktest.cc index 4a79d5a17..e3103f6b6 100644 --- a/src/cpu/testers/networktest/networktest.cc +++ b/src/cpu/testers/networktest/networktest.cc @@ -59,7 +59,7 @@ NetworkTest::CpuPort::recvTimingResp(PacketPtr pkt) } void -NetworkTest::CpuPort::recvRetry() +NetworkTest::CpuPort::recvReqRetry() { networktest->doRetry(); } diff --git a/src/cpu/testers/networktest/networktest.hh b/src/cpu/testers/networktest/networktest.hh index 253b48233..572097a09 100644 --- a/src/cpu/testers/networktest/networktest.hh +++ b/src/cpu/testers/networktest/networktest.hh @@ -91,7 +91,7 @@ class NetworkTest : public MemObject virtual bool recvTimingResp(PacketPtr pkt); - virtual void recvRetry(); + virtual void recvReqRetry(); }; CpuPort cachePort; diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh index e1b829a7d..c0ad554c6 100644 --- a/src/cpu/testers/rubytest/RubyTester.hh +++ b/src/cpu/testers/rubytest/RubyTester.hh @@ -75,7 +75,7 @@ class RubyTester : public MemObject protected: virtual bool recvTimingResp(PacketPtr pkt); - virtual void recvRetry() + virtual void recvReqRetry() { panic("%s does not expect a retry\n", name()); } }; diff --git a/src/cpu/testers/traffic_gen/traffic_gen.cc b/src/cpu/testers/traffic_gen/traffic_gen.cc index 9eee7a119..e4759512d 100644 --- a/src/cpu/testers/traffic_gen/traffic_gen.cc +++ b/src/cpu/testers/traffic_gen/traffic_gen.cc @@ -476,7 +476,7 @@ TrafficGen::enterState(uint32_t newState) } void -TrafficGen::recvRetry() +TrafficGen::recvReqRetry() { assert(retryPkt != NULL); diff --git a/src/cpu/testers/traffic_gen/traffic_gen.hh b/src/cpu/testers/traffic_gen/traffic_gen.hh index b9081b1d4..eb9f6541d 100644 --- a/src/cpu/testers/traffic_gen/traffic_gen.hh +++ b/src/cpu/testers/traffic_gen/traffic_gen.hh @@ -91,7 +91,7 @@ class TrafficGen : public MemObject * Receive a retry from the neighbouring port and attempt to * resend the waiting packet. */ - void recvRetry(); + void recvReqRetry(); /** Struct to represent a probabilistic transition during parsing. */ struct Transition { @@ -148,7 +148,7 @@ class TrafficGen : public MemObject protected: - void recvRetry() { trafficGen.recvRetry(); } + void recvReqRetry() { trafficGen.recvReqRetry(); } bool recvTimingResp(PacketPtr pkt); diff --git a/src/dev/dma_device.cc b/src/dev/dma_device.cc index d53ea2546..f6e476a8e 100644 --- a/src/dev/dma_device.cc +++ b/src/dev/dma_device.cc @@ -146,7 +146,7 @@ DmaPort::drain(DrainManager *dm) } void -DmaPort::recvRetry() +DmaPort::recvReqRetry() { assert(transmitList.size()); trySendTimingReq(); diff --git a/src/dev/dma_device.hh b/src/dev/dma_device.hh index 6df4a287d..933cbeb00 100644 --- a/src/dev/dma_device.hh +++ b/src/dev/dma_device.hh @@ -134,7 +134,7 @@ class DmaPort : public MasterPort protected: bool recvTimingResp(PacketPtr pkt); - void recvRetry() ; + void recvReqRetry() ; void queueDma(PacketPtr pkt); diff --git a/src/mem/addr_mapper.cc b/src/mem/addr_mapper.cc index 0cc2e9c2f..06237745b 100644 --- a/src/mem/addr_mapper.cc +++ b/src/mem/addr_mapper.cc @@ -190,15 +190,15 @@ AddrMapper::isSnooping() const } void -AddrMapper::recvRetryMaster() +AddrMapper::recvReqRetry() { - slavePort.sendRetry(); + slavePort.sendRetryReq(); } void -AddrMapper::recvRetrySlave() +AddrMapper::recvRespRetry() { - masterPort.sendRetry(); + masterPort.sendRetryResp(); } void diff --git a/src/mem/addr_mapper.hh b/src/mem/addr_mapper.hh index 6564a7490..6765638e9 100644 --- a/src/mem/addr_mapper.hh +++ b/src/mem/addr_mapper.hh @@ -143,9 +143,9 @@ class AddrMapper : public MemObject return mapper.isSnooping(); } - void recvRetry() + void recvReqRetry() { - mapper.recvRetryMaster(); + mapper.recvReqRetry(); } private: @@ -193,9 +193,9 @@ class AddrMapper : public MemObject return mapper.getAddrRanges(); } - void recvRetry() + void recvRespRetry() { - mapper.recvRetrySlave(); + mapper.recvRespRetry(); } private: @@ -227,9 +227,9 @@ class AddrMapper : public MemObject bool isSnooping() const; - void recvRetryMaster(); + void recvReqRetry(); - void recvRetrySlave(); + void recvRespRetry(); void recvRangeChange(); }; diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc index c003677ba..5b925d1cb 100644 --- a/src/mem/bridge.cc +++ b/src/mem/bridge.cc @@ -200,7 +200,7 @@ Bridge::BridgeSlavePort::retryStalledReq() if (retryReq) { DPRINTF(Bridge, "Request waiting for retry, now retrying\n"); retryReq = false; - sendRetry(); + sendRetryReq(); } } @@ -309,7 +309,7 @@ Bridge::BridgeSlavePort::trySendTiming() if (!masterPort.reqQueueFull() && retryReq) { DPRINTF(Bridge, "Request waiting for retry, now retrying\n"); retryReq = false; - sendRetry(); + sendRetryReq(); } } @@ -318,13 +318,13 @@ Bridge::BridgeSlavePort::trySendTiming() } void -Bridge::BridgeMasterPort::recvRetry() +Bridge::BridgeMasterPort::recvReqRetry() { trySendTiming(); } void -Bridge::BridgeSlavePort::recvRetry() +Bridge::BridgeSlavePort::recvRespRetry() { trySendTiming(); } diff --git a/src/mem/bridge.hh b/src/mem/bridge.hh index e6fa498cb..6aebe5204 100644 --- a/src/mem/bridge.hh +++ b/src/mem/bridge.hh @@ -193,7 +193,7 @@ class Bridge : public MemObject /** When receiving a retry request from the peer port, pass it to the bridge. */ - void recvRetry(); + void recvRespRetry(); /** When receiving a Atomic requestfrom the peer port, pass it to the bridge. */ @@ -301,7 +301,7 @@ class Bridge : public MemObject /** When receiving a retry request from the peer port, pass it to the bridge. */ - void recvRetry(); + void recvReqRetry(); }; /** Slave port of the bridge. */ diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index 78e2ca9ab..cf55b8591 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -122,7 +122,7 @@ BaseCache::CacheSlavePort::processSendRetry() // reset the flag and call retry mustSendRetry = false; - sendRetry(); + sendRetryReq(); } void diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh index beb818961..bda3df34a 100644 --- a/src/mem/cache/base.hh +++ b/src/mem/cache/base.hh @@ -125,20 +125,20 @@ class BaseCache : public MemObject /** * Schedule a send of a request packet (from the MSHR). Note - * that we could already have a retry or a transmit list of - * responses outstanding. + * that we could already have a retry outstanding. */ void requestBus(RequestCause cause, Tick time) { DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause); - queue.schedSendEvent(time); + reqQueue.schedSendEvent(time); } protected: CacheMasterPort(const std::string &_name, BaseCache *_cache, - MasterPacketQueue &_queue) : - QueuedMasterPort(_name, _cache, _queue) + ReqPacketQueue &_reqQueue, + SnoopRespPacketQueue &_snoopRespQueue) : + QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) { } /** @@ -176,7 +176,7 @@ class BaseCache : public MemObject const std::string &_label); /** A normal packet queue used to store responses. */ - SlavePacketQueue queue; + RespPacketQueue queue; bool blocked; diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index 21a00dbbd..0ee776e92 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -114,18 +114,21 @@ class Cache : public BaseCache * current MSHR status. This queue has a pointer to our specific * cache implementation and is used by the MemSidePort. */ - class MemSidePacketQueue : public MasterPacketQueue + class CacheReqPacketQueue : public ReqPacketQueue { protected: Cache<TagStore> &cache; + SnoopRespPacketQueue &snoopRespQueue; public: - MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port, - const std::string &label) : - MasterPacketQueue(cache, port, label), cache(cache) { } + CacheReqPacketQueue(Cache<TagStore> &cache, MasterPort &port, + SnoopRespPacketQueue &snoop_resp_queue, + const std::string &label) : + ReqPacketQueue(cache, port, label), cache(cache), + snoopRespQueue(snoop_resp_queue) { } /** * Override the normal sendDeferredPacket and do not only @@ -145,7 +148,9 @@ class Cache : public BaseCache private: /** The cache-specific queue. */ - MemSidePacketQueue _queue; + CacheReqPacketQueue _reqQueue; + + SnoopRespPacketQueue _snoopRespQueue; // a pointer to our specific cache implementation Cache<TagStore> *cache; diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 14e49e1f7..803b3bad8 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -2183,61 +2183,84 @@ Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt) template<class TagStore> void -Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket() +Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket() { - // if we have a response packet waiting we have to start with that - if (deferredPacketReady()) { - // use the normal approach from the timing port - trySendTiming(); + // sanity check + assert(!waitingOnRetry); + + // there should never be any deferred request packets in the + // queue, instead we resly on the cache to provide the packets + // from the MSHR queue or write queue + assert(deferredPacketReadyTime() == MaxTick); + + // check for request packets (requests & writebacks) + PacketPtr pkt = cache.getTimingPacket(); + if (pkt == NULL) { + // can happen if e.g. we attempt a writeback and fail, but + // before the retry, the writeback is eliminated because + // we snoop another cache's ReadEx. } else { - // check for request packets (requests & writebacks) - PacketPtr pkt = cache.getTimingPacket(); - if (pkt == NULL) { - // can happen if e.g. we attempt a writeback and fail, but - // before the retry, the writeback is eliminated because - // we snoop another cache's ReadEx. - waitingOnRetry = false; - } else { - MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); + MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); + // in most cases getTimingPacket allocates a new packet, and + // we must delete it unless it is successfully sent + bool delete_pkt = !mshr->isForwardNoResponse(); + + // let our snoop responses go first if there are responses to + // the same addresses we are about to writeback, note that + // this creates a dependency between requests and snoop + // responses, but that should not be a problem since there is + // a chain already and the key is that the snoop responses can + // sink unconditionally + if (snoopRespQueue.hasAddr(pkt->getAddr())) { + DPRINTF(CachePort, "Waiting for snoop response to be sent\n"); + Tick when = snoopRespQueue.deferredPacketReadyTime(); + schedSendEvent(when); + + if (delete_pkt) + delete pkt; - waitingOnRetry = !masterPort.sendTimingReq(pkt); + return; + } - if (waitingOnRetry) { - DPRINTF(CachePort, "now waiting on a retry\n"); - if (!mshr->isForwardNoResponse()) { - // we are awaiting a retry, but we - // delete the packet and will be creating a new packet - // when we get the opportunity - delete pkt; - } - // note that we have now masked any requestBus and - // schedSendEvent (we will wait for a retry before - // doing anything), and this is so even if we do not - // care about this packet and might override it before - // it gets retried - } else { - // As part of the call to sendTimingReq the packet is - // forwarded to all neighbouring caches (and any - // caches above them) as a snoop. The packet is also - // sent to any potential cache below as the - // interconnect is not allowed to buffer the - // packet. Thus at this point we know if any of the - // neighbouring, or the downstream cache is - // responding, and if so, if it is with a dirty line - // or not. - bool pending_dirty_resp = !pkt->sharedAsserted() && - pkt->memInhibitAsserted(); - - cache.markInService(mshr, pending_dirty_resp); + + waitingOnRetry = !masterPort.sendTimingReq(pkt); + + if (waitingOnRetry) { + DPRINTF(CachePort, "now waiting on a retry\n"); + if (delete_pkt) { + // we are awaiting a retry, but we + // delete the packet and will be creating a new packet + // when we get the opportunity + delete pkt; } + // note that we have now masked any requestBus and + // schedSendEvent (we will wait for a retry before + // doing anything), and this is so even if we do not + // care about this packet and might override it before + // it gets retried + } else { + // As part of the call to sendTimingReq the packet is + // forwarded to all neighbouring caches (and any + // caches above them) as a snoop. The packet is also + // sent to any potential cache below as the + // interconnect is not allowed to buffer the + // packet. Thus at this point we know if any of the + // neighbouring, or the downstream cache is + // responding, and if so, if it is with a dirty line + // or not. + bool pending_dirty_resp = !pkt->sharedAsserted() && + pkt->memInhibitAsserted(); + + cache.markInService(mshr, pending_dirty_resp); } } // if we succeeded and are not waiting for a retry, schedule the - // next send, not only looking at the response transmit list, but - // also considering when the next MSHR is ready + // next send considering when the next MSHR is ready, note that + // snoop responses have their own packet queue and thus schedule + // their own events if (!waitingOnRetry) { - scheduleSend(cache.nextMSHRReadyTime()); + schedSendEvent(cache.nextMSHRReadyTime()); } } @@ -2245,8 +2268,9 @@ template<class TagStore> Cache<TagStore>:: MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache, const std::string &_label) - : BaseCache::CacheMasterPort(_name, _cache, _queue), - _queue(*_cache, *this, _label), cache(_cache) + : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), + _reqQueue(*_cache, *this, _snoopRespQueue, _label), + _snoopRespQueue(*_cache, *this, _label), cache(_cache) { } diff --git a/src/mem/coherent_xbar.cc b/src/mem/coherent_xbar.cc index 454de69ce..667ff96f9 100644 --- a/src/mem/coherent_xbar.cc +++ b/src/mem/coherent_xbar.cc @@ -66,8 +66,8 @@ CoherentXBar::CoherentXBar(const CoherentXBarParams *p) masterPorts.push_back(bp); reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d", i))); - snoopLayers.push_back(new SnoopLayer(*bp, *this, - csprintf(".snoopLayer%d", i))); + snoopLayers.push_back(new SnoopRespLayer(*bp, *this, + csprintf(".snoopLayer%d", i))); } // see if we have a default slave device connected and if so add @@ -80,9 +80,9 @@ CoherentXBar::CoherentXBar(const CoherentXBarParams *p) masterPorts.push_back(bp); reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d", defaultPortID))); - snoopLayers.push_back(new SnoopLayer(*bp, *this, - csprintf(".snoopLayer%d", - defaultPortID))); + snoopLayers.push_back(new SnoopRespLayer(*bp, *this, + csprintf(".snoopLayer%d", + defaultPortID))); } // create the slave ports, once again starting at zero @@ -528,7 +528,7 @@ CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id, } void -CoherentXBar::recvRetry(PortID master_port_id) +CoherentXBar::recvReqRetry(PortID master_port_id) { // responses and snoop responses never block on forwarding them, // so the retry will always be coming from a port to which we diff --git a/src/mem/coherent_xbar.hh b/src/mem/coherent_xbar.hh index 446c0b577..ffe4a066b 100644 --- a/src/mem/coherent_xbar.hh +++ b/src/mem/coherent_xbar.hh @@ -75,12 +75,9 @@ class CoherentXBar : public BaseXBar * Declare the layers of this crossbar, one vector for requests, * one for responses, and one for snoop responses */ - typedef Layer<SlavePort,MasterPort> ReqLayer; - typedef Layer<MasterPort,SlavePort> RespLayer; - typedef Layer<SlavePort,MasterPort> SnoopLayer; std::vector<ReqLayer*> reqLayers; std::vector<RespLayer*> respLayers; - std::vector<SnoopLayer*> snoopLayers; + std::vector<SnoopRespLayer*> snoopLayers; /** * Declaration of the coherent crossbar slave port type, one will @@ -131,7 +128,7 @@ class CoherentXBar : public BaseXBar /** * When receiving a retry, pass it to the crossbar. */ - virtual void recvRetry() + virtual void recvRespRetry() { panic("Crossbar slave ports should never retry.\n"); } /** @@ -202,8 +199,8 @@ class CoherentXBar : public BaseXBar /** When reciving a retry from the peer port (at id), pass it to the crossbar. */ - virtual void recvRetry() - { xbar.recvRetry(id); } + virtual void recvReqRetry() + { xbar.recvReqRetry(id); } }; @@ -233,14 +230,15 @@ class CoherentXBar : public BaseXBar * Override the sending of retries and pass them on through * the mirrored slave port. */ - void sendRetry() { - slavePort.sendRetry(); + void sendRetryResp() { + // forward it as a snoop response retry + slavePort.sendRetrySnoopResp(); } /** * Provided as necessary. */ - void recvRetry() { panic("SnoopRespPort should never see retry\n"); } + void recvReqRetry() { panic("SnoopRespPort should never see retry\n"); } /** * Provided as necessary. @@ -292,7 +290,7 @@ class CoherentXBar : public BaseXBar /** Timing function called by port when it is once again able to process * requests. */ - void recvRetry(PortID master_port_id); + void recvReqRetry(PortID master_port_id); /** * Forward a timing packet to our snoopers, potentially excluding diff --git a/src/mem/comm_monitor.cc b/src/mem/comm_monitor.cc index 7539672cc..dc4fa4bd4 100644 --- a/src/mem/comm_monitor.cc +++ b/src/mem/comm_monitor.cc @@ -429,15 +429,15 @@ CommMonitor::getAddrRanges() const } void -CommMonitor::recvRetryMaster() +CommMonitor::recvReqRetry() { - slavePort.sendRetry(); + slavePort.sendRetryReq(); } void -CommMonitor::recvRetrySlave() +CommMonitor::recvRespRetry() { - masterPort.sendRetry(); + masterPort.sendRetryResp(); } void diff --git a/src/mem/comm_monitor.hh b/src/mem/comm_monitor.hh index f1c6b496d..f4aa9a20e 100644 --- a/src/mem/comm_monitor.hh +++ b/src/mem/comm_monitor.hh @@ -173,9 +173,9 @@ class CommMonitor : public MemObject return mon.isSnooping(); } - void recvRetry() + void recvReqRetry() { - mon.recvRetryMaster(); + mon.recvReqRetry(); } private: @@ -229,9 +229,9 @@ class CommMonitor : public MemObject return mon.getAddrRanges(); } - void recvRetry() + void recvRespRetry() { - mon.recvRetrySlave(); + mon.recvRespRetry(); } private: @@ -263,9 +263,9 @@ class CommMonitor : public MemObject bool isSnooping() const; - void recvRetryMaster(); + void recvReqRetry(); - void recvRetrySlave(); + void recvRespRetry(); void recvRangeChange(); diff --git a/src/mem/dram_ctrl.cc b/src/mem/dram_ctrl.cc index f4bea04b0..8682cbbaf 100644 --- a/src/mem/dram_ctrl.cc +++ b/src/mem/dram_ctrl.cc @@ -748,7 +748,7 @@ DRAMCtrl::processRespondEvent() // so if there is a read that was forced to wait, retry now if (retryRdReq) { retryRdReq = false; - port.sendRetry(); + port.sendRetryReq(); } } @@ -1441,7 +1441,7 @@ DRAMCtrl::processNextReqEvent() // the next request processing if (retryWrReq && writeQueue.size() < writeBufferSize) { retryWrReq = false; - port.sendRetry(); + port.sendRetryReq(); } } diff --git a/src/mem/dram_ctrl.hh b/src/mem/dram_ctrl.hh index 3aa06feac..3caaff959 100644 --- a/src/mem/dram_ctrl.hh +++ b/src/mem/dram_ctrl.hh @@ -95,7 +95,7 @@ class DRAMCtrl : public AbstractMemory class MemoryPort : public QueuedSlavePort { - SlavePacketQueue queue; + RespPacketQueue queue; DRAMCtrl& memory; public: diff --git a/src/mem/dramsim2.cc b/src/mem/dramsim2.cc index eb20b9486..7346a4515 100644 --- a/src/mem/dramsim2.cc +++ b/src/mem/dramsim2.cc @@ -147,7 +147,7 @@ DRAMSim2::tick() // state and send a retry if conditions have changed if (retryReq && nbrOutstanding() < wrapper.queueSize()) { retryReq = false; - port.sendRetry(); + port.sendRetryReq(); } schedule(tickEvent, curTick() + wrapper.clockPeriod() * SimClock::Int::ns); @@ -244,7 +244,7 @@ DRAMSim2::recvTimingReq(PacketPtr pkt) } void -DRAMSim2::recvRetry() +DRAMSim2::recvRespRetry() { DPRINTF(DRAMSim2, "Retrying\n"); @@ -402,9 +402,9 @@ DRAMSim2::MemoryPort::recvTimingReq(PacketPtr pkt) } void -DRAMSim2::MemoryPort::recvRetry() +DRAMSim2::MemoryPort::recvRespRetry() { - memory.recvRetry(); + memory.recvRespRetry(); } DRAMSim2* diff --git a/src/mem/dramsim2.hh b/src/mem/dramsim2.hh index 7153f3f84..cd87fe73d 100644 --- a/src/mem/dramsim2.hh +++ b/src/mem/dramsim2.hh @@ -80,7 +80,7 @@ class DRAMSim2 : public AbstractMemory bool recvTimingReq(PacketPtr pkt); - void recvRetry(); + void recvRespRetry(); AddrRangeList getAddrRanges() const; @@ -208,7 +208,7 @@ class DRAMSim2 : public AbstractMemory Tick recvAtomic(PacketPtr pkt); void recvFunctional(PacketPtr pkt); bool recvTimingReq(PacketPtr pkt); - void recvRetry(); + void recvRespRetry(); }; diff --git a/src/mem/external_slave.cc b/src/mem/external_slave.cc index a6f72fd71..fadeff833 100644 --- a/src/mem/external_slave.cc +++ b/src/mem/external_slave.cc @@ -80,7 +80,7 @@ class StubSlavePort : public ExternalSlave::Port void recvFunctional(PacketPtr packet); bool recvTimingReq(PacketPtr packet); bool recvTimingSnoopResp(PacketPtr packet); - void recvRetry(); + void recvRespRetry(); void recvFunctionalSnoop(PacketPtr packet); }; @@ -131,7 +131,7 @@ StubSlavePort::ResponseEvent::process() owner.responsePacket = NULL; if (owner.mustRetry) - owner.sendRetry(); + owner.sendRetryReq(); owner.mustRetry = false; } } @@ -161,7 +161,7 @@ StubSlavePort::recvTimingSnoopResp(PacketPtr packet) } void -StubSlavePort::recvRetry() +StubSlavePort::recvRespRetry() { assert(responsePacket); /* Stub handles only one response at a time so responseEvent should never diff --git a/src/mem/mem_checker_monitor.cc b/src/mem/mem_checker_monitor.cc index e0f863d99..e70e4f856 100644 --- a/src/mem/mem_checker_monitor.cc +++ b/src/mem/mem_checker_monitor.cc @@ -356,15 +356,15 @@ MemCheckerMonitor::getAddrRanges() const } void -MemCheckerMonitor::recvRetryMaster() +MemCheckerMonitor::recvReqRetry() { - slavePort.sendRetry(); + slavePort.sendRetryReq(); } void -MemCheckerMonitor::recvRetrySlave() +MemCheckerMonitor::recvRespRetry() { - masterPort.sendRetry(); + masterPort.sendRetryResp(); } void diff --git a/src/mem/mem_checker_monitor.hh b/src/mem/mem_checker_monitor.hh index 15e5b9665..e3a8832b5 100644 --- a/src/mem/mem_checker_monitor.hh +++ b/src/mem/mem_checker_monitor.hh @@ -136,9 +136,9 @@ class MemCheckerMonitor : public MemObject return mon.isSnooping(); } - void recvRetry() + void recvReqRetry() { - mon.recvRetryMaster(); + mon.recvReqRetry(); } private: @@ -192,9 +192,9 @@ class MemCheckerMonitor : public MemObject return mon.getAddrRanges(); } - void recvRetry() + void recvRespRetry() { - mon.recvRetrySlave(); + mon.recvRespRetry(); } private: @@ -226,9 +226,9 @@ class MemCheckerMonitor : public MemObject bool isSnooping() const; - void recvRetryMaster(); + void recvReqRetry(); - void recvRetrySlave(); + void recvRespRetry(); void recvRangeChange(); diff --git a/src/mem/mport.hh b/src/mem/mport.hh index b74761256..9bc437c52 100644 --- a/src/mem/mport.hh +++ b/src/mem/mport.hh @@ -76,7 +76,8 @@ class MessageMasterPort : public QueuedMasterPort public: MessageMasterPort(const std::string &name, MemObject *owner) : - QueuedMasterPort(name, owner, queue), queue(*owner, *this) + QueuedMasterPort(name, owner, reqQueue, snoopRespQueue), + reqQueue(*owner, *this), snoopRespQueue(*owner, *this) {} virtual ~MessageMasterPort() @@ -87,7 +88,8 @@ class MessageMasterPort : public QueuedMasterPort protected: /** A packet queue for outgoing packets. */ - MasterPacketQueue queue; + ReqPacketQueue reqQueue; + SnoopRespPacketQueue snoopRespQueue; // Accept and ignore responses. virtual Tick recvResponse(PacketPtr pkt) diff --git a/src/mem/noncoherent_xbar.cc b/src/mem/noncoherent_xbar.cc index 0cf656f80..db33f0f70 100644 --- a/src/mem/noncoherent_xbar.cc +++ b/src/mem/noncoherent_xbar.cc @@ -224,7 +224,7 @@ NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id) } void -NoncoherentXBar::recvRetry(PortID master_port_id) +NoncoherentXBar::recvReqRetry(PortID master_port_id) { // responses never block on forwarding them, so the retry will // always be coming from a port to which we tried to forward a diff --git a/src/mem/noncoherent_xbar.hh b/src/mem/noncoherent_xbar.hh index 122fc6b27..ba99d9be8 100644 --- a/src/mem/noncoherent_xbar.hh +++ b/src/mem/noncoherent_xbar.hh @@ -76,8 +76,6 @@ class NoncoherentXBar : public BaseXBar * Declare the layers of this crossbar, one vector for requests * and one for responses. */ - typedef Layer<SlavePort,MasterPort> ReqLayer; - typedef Layer<MasterPort,SlavePort> RespLayer; std::vector<ReqLayer*> reqLayers; std::vector<RespLayer*> respLayers; @@ -123,7 +121,7 @@ class NoncoherentXBar : public BaseXBar /** * When receiving a retry, pass it to the crossbar. */ - virtual void recvRetry() + virtual void recvRespRetry() { panic("Crossbar slave ports should never retry.\n"); } /** @@ -168,8 +166,8 @@ class NoncoherentXBar : public BaseXBar /** When reciving a retry from the peer port (at id), pass it to the crossbar. */ - virtual void recvRetry() - { xbar.recvRetry(id); } + virtual void recvReqRetry() + { xbar.recvReqRetry(id); } }; @@ -183,7 +181,7 @@ class NoncoherentXBar : public BaseXBar /** Timing function called by port when it is once again able to process * requests. */ - void recvRetry(PortID master_port_id); + void recvReqRetry(PortID master_port_id); /** Function called by the port when the crossbar is recieving a Atomic transaction.*/ diff --git a/src/mem/packet_queue.cc b/src/mem/packet_queue.cc index e9fe72ead..29f6d2903 100644 --- a/src/mem/packet_queue.cc +++ b/src/mem/packet_queue.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 ARM Limited + * Copyright (c) 2012,2015 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -63,10 +63,23 @@ PacketQueue::retry() { DPRINTF(PacketQueue, "Queue %s received retry\n", name()); assert(waitingOnRetry); + waitingOnRetry = false; sendDeferredPacket(); } bool +PacketQueue::hasAddr(Addr addr) const +{ + // caller is responsible for ensuring that all packets have the + // same alignment + for (const auto& p : transmitList) { + if (p.pkt->getAddr() == addr) + return true; + } + return false; +} + +bool PacketQueue::checkFunctional(PacketPtr pkt) { pkt->pushLabel(label); @@ -87,27 +100,11 @@ PacketQueue::checkFunctional(PacketPtr pkt) } void -PacketQueue::schedSendEvent(Tick when) -{ - // if we are waiting on a retry, do not schedule a send event, and - // instead rely on retry being called - if (waitingOnRetry) { - assert(!sendEvent.scheduled()); - return; - } - - if (!sendEvent.scheduled()) { - em.schedule(&sendEvent, when); - } else if (sendEvent.when() > when) { - em.reschedule(&sendEvent, when); - } -} - -void -PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop) +PacketQueue::schedSendTiming(PacketPtr pkt, Tick when) { DPRINTF(PacketQueue, "%s for %s address %x size %d\n", __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize()); + // we can still send a packet before the end of this tick assert(when >= curTick()); @@ -127,14 +124,22 @@ PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop) // note that currently we ignore a potentially outstanding retry // and could in theory put a new packet at the head of the // transmit list before retrying the existing packet - transmitList.push_front(DeferredPacket(when, pkt, send_as_snoop)); + transmitList.push_front(DeferredPacket(when, pkt)); schedSendEvent(when); return; } + // we should either have an outstanding retry, or a send event + // scheduled, but there is an unfortunate corner case where the + // x86 page-table walker and timing CPU send out a new request as + // part of the receiving of a response (called by + // PacketQueue::sendDeferredPacket), in which we end up calling + // ourselves again before we had a chance to update waitingOnRetry + // assert(waitingOnRetry || sendEvent.scheduled()); + // list is non-empty and this belongs at the end if (when >= transmitList.back().tick) { - transmitList.push_back(DeferredPacket(when, pkt, send_as_snoop)); + transmitList.push_back(DeferredPacket(when, pkt)); return; } @@ -143,46 +148,35 @@ PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop) ++i; // already checked for insertion at front while (i != transmitList.end() && when >= i->tick) ++i; - transmitList.insert(i, DeferredPacket(when, pkt, send_as_snoop)); + transmitList.insert(i, DeferredPacket(when, pkt)); } -void PacketQueue::trySendTiming() +void +PacketQueue::schedSendEvent(Tick when) { - assert(deferredPacketReady()); - - DeferredPacket dp = transmitList.front(); - - // use the appropriate implementation of sendTiming based on the - // type of port associated with the queue, and whether the packet - // is to be sent as a snoop or not - waitingOnRetry = !sendTiming(dp.pkt, dp.sendAsSnoop); - - if (!waitingOnRetry) { - // take the packet off the list - transmitList.pop_front(); + // if we are waiting on a retry just hold off + if (waitingOnRetry) { + DPRINTF(PacketQueue, "Not scheduling send as waiting for retry\n"); + assert(!sendEvent.scheduled()); + return; } -} -void -PacketQueue::scheduleSend(Tick time) -{ - // the next ready time is either determined by the next deferred packet, - // or in the cache through the MSHR ready time - Tick nextReady = std::max(std::min(deferredPacketReadyTime(), time), - curTick() + 1); + if (when != MaxTick) { + // we cannot go back in time, and to be consistent we stick to + // one tick in the future + when = std::max(when, curTick() + 1); + // @todo Revisit the +1 - if (nextReady != MaxTick) { - // if the sendTiming caused someone else to call our - // recvTiming we could already have an event scheduled, check if (!sendEvent.scheduled()) { - em.schedule(&sendEvent, nextReady); - } else if (nextReady < sendEvent.when()) { + em.schedule(&sendEvent, when); + } else if (when < sendEvent.when()) { // if the new time is earlier than when the event // currently is scheduled, move it forward - em.reschedule(&sendEvent, nextReady); + em.reschedule(&sendEvent, when); } } else { - // no more to send, so if we're draining, we may be done + // we get a MaxTick when there is no more to send, so if we're + // draining, we may be done at this point if (drainManager && transmitList.empty() && !sendEvent.scheduled()) { DPRINTF(Drain, "PacketQueue done draining," "processing drain event\n"); @@ -195,14 +189,30 @@ PacketQueue::scheduleSend(Tick time) void PacketQueue::sendDeferredPacket() { - // try to send what is on the list, this will set waitingOnRetry - // accordingly - trySendTiming(); + // sanity checks + assert(!waitingOnRetry); + assert(deferredPacketReady()); + + DeferredPacket dp = transmitList.front(); + + // take the packet of the list before sending it, as sending of + // the packet in some cases causes a new packet to be enqueued + // (most notaly when responding to the timing CPU, leading to a + // new request hitting in the L1 icache, leading to a new + // response) + transmitList.pop_front(); + + // use the appropriate implementation of sendTiming based on the + // type of queue + waitingOnRetry = !sendTiming(dp.pkt); // if we succeeded and are not waiting for a retry, schedule the // next send if (!waitingOnRetry) { - scheduleSend(); + schedSendEvent(deferredPacketReadyTime()); + } else { + // put the packet back at the front of the list + transmitList.push_front(dp); } } @@ -223,32 +233,39 @@ PacketQueue::drain(DrainManager *dm) return 1; } -MasterPacketQueue::MasterPacketQueue(EventManager& _em, MasterPort& _masterPort, - const std::string _label) +ReqPacketQueue::ReqPacketQueue(EventManager& _em, MasterPort& _masterPort, + const std::string _label) + : PacketQueue(_em, _label), masterPort(_masterPort) +{ +} + +bool +ReqPacketQueue::sendTiming(PacketPtr pkt) +{ + return masterPort.sendTimingReq(pkt); +} + +SnoopRespPacketQueue::SnoopRespPacketQueue(EventManager& _em, + MasterPort& _masterPort, + const std::string _label) : PacketQueue(_em, _label), masterPort(_masterPort) { } bool -MasterPacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop) +SnoopRespPacketQueue::sendTiming(PacketPtr pkt) { - // attempt to send the packet and return according to the outcome - if (!send_as_snoop) - return masterPort.sendTimingReq(pkt); - else - return masterPort.sendTimingSnoopResp(pkt); + return masterPort.sendTimingSnoopResp(pkt); } -SlavePacketQueue::SlavePacketQueue(EventManager& _em, SlavePort& _slavePort, - const std::string _label) +RespPacketQueue::RespPacketQueue(EventManager& _em, SlavePort& _slavePort, + const std::string _label) : PacketQueue(_em, _label), slavePort(_slavePort) { } bool -SlavePacketQueue::sendTiming(PacketPtr pkt, bool send_as_snoop) +RespPacketQueue::sendTiming(PacketPtr pkt) { - // we should never have queued snoop requests - assert(!send_as_snoop); return slavePort.sendTimingResp(pkt); } diff --git a/src/mem/packet_queue.hh b/src/mem/packet_queue.hh index 3ed3fae28..164ff6345 100644 --- a/src/mem/packet_queue.hh +++ b/src/mem/packet_queue.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 ARM Limited + * Copyright (c) 2012,2015 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -49,8 +49,7 @@ * Declaration of a simple PacketQueue that is associated with * a port on which it attempts to send packets according to the time * stamp given to them at insertion. The packet queue is responsible - * for the flow control of the port, but relies on the module - * notifying the queue when a transfer ends. + * for the flow control of the port. */ #include <list> @@ -71,28 +70,23 @@ class PacketQueue : public Drainable public: Tick tick; ///< The tick when the packet is ready to transmit PacketPtr pkt; ///< Pointer to the packet to transmit - bool sendAsSnoop; ///< Should it be sent as a snoop or not - DeferredPacket(Tick t, PacketPtr p, bool send_as_snoop) - : tick(t), pkt(p), sendAsSnoop(send_as_snoop) + DeferredPacket(Tick t, PacketPtr p) + : tick(t), pkt(p) {} }; typedef std::list<DeferredPacket> DeferredPacketList; - /** A list of outgoing timing response packets that haven't been - * serviced yet. */ + /** A list of outgoing packets. */ DeferredPacketList transmitList; /** The manager which is used for the event queue */ EventManager& em; - /** This function attempts to send deferred packets. Scheduled to - * be called in the future via SendEvent. */ + /** Used to schedule sending of deferred packets. */ void processSendEvent(); - /** - * Event used to call processSendEvent. - **/ + /** Event used to call processSendEvent. */ EventWrapper<PacketQueue, &PacketQueue::processSendEvent> sendEvent; /** If we need to drain, keep the drain manager around until we're done @@ -104,55 +98,28 @@ class PacketQueue : public Drainable /** Label to use for print request packets label stack. */ const std::string label; - /** Remember whether we're awaiting a retry from the bus. */ + /** Remember whether we're awaiting a retry. */ bool waitingOnRetry; /** Check whether we have a packet ready to go on the transmit list. */ bool deferredPacketReady() const { return !transmitList.empty() && transmitList.front().tick <= curTick(); } - Tick deferredPacketReadyTime() const - { return transmitList.empty() ? MaxTick : transmitList.front().tick; } - /** - * Attempt to send the packet at the head of the transmit - * list. Caller must guarantee that the list is non-empty and that - * the head packet is scheduled for curTick() (or earlier). Note - * that a subclass of the PacketQueue can override this method and - * thus change the behaviour (as done by the cache). + * Attempt to send a packet. Note that a subclass of the + * PacketQueue can override this method and thus change the + * behaviour (as done by the cache for the request queue). The + * default implementation sends the head of the transmit list. The + * caller must guarantee that the list is non-empty and that the + * head packet is scheduled for curTick() (or earlier). */ virtual void sendDeferredPacket(); /** - * Attempt to send the packet at the front of the transmit list, - * and set waitingOnRetry accordingly. The packet is temporarily - * taken off the list, but put back at the front if not - * successfully sent. + * Send a packet using the appropriate method for the specific + * subclass (reuest, response or snoop response). */ - void trySendTiming(); - - /** - * - */ - virtual bool sendTiming(PacketPtr pkt, bool send_as_snoop) = 0; - - /** - * Based on the transmit list, or the provided time, schedule a - * send event if there are packets to send. If we are idle and - * asked to drain then do so. - * - * @param time an alternative time for the next send event - */ - void scheduleSend(Tick time = MaxTick); - - /** - * Simple ports are generally used as slave ports (i.e. the - * respond to requests) and thus do not expect to receive any - * range changes (as the neighbouring port has a master role and - * do not have any address ranges. A subclass can override the - * default behaviuor if needed. - */ - virtual void recvRangeChange() { } + virtual bool sendTiming(PacketPtr pkt) = 0; /** * Create a packet queue, linked to an event manager, and a label @@ -177,40 +144,56 @@ class PacketQueue : public Drainable */ virtual const std::string name() const = 0; + /** + * Get the size of the queue. + */ + size_t size() const { return transmitList.size(); } + + /** + * Get the next packet ready time. + */ + Tick deferredPacketReadyTime() const + { return transmitList.empty() ? MaxTick : transmitList.front().tick; } + + /** + * Check if a packets address exists in the queue. + */ + bool hasAddr(Addr addr) const; + /** Check the list of buffered packets against the supplied * functional request. */ bool checkFunctional(PacketPtr pkt); /** - * Schedule a send even if not already waiting for a retry. If the - * requested time is before an already scheduled send event it - * will be rescheduled. + * Schedule a send event if we are not already waiting for a + * retry. If the requested time is before an already scheduled + * send event, the event will be rescheduled. If MaxTick is + * passed, no event is scheduled. Instead, if we are idle and + * asked to drain then check and signal drained. * - * @param when + * @param when time to schedule an event */ void schedSendEvent(Tick when); /** - * Add a packet to the transmit list, and ensure that a - * processSendEvent is called in the future. + * Add a packet to the transmit list, and schedule a send event. * * @param pkt Packet to send * @param when Absolute time (in ticks) to send packet - * @param send_as_snoop Send the packet as a snoop or not */ - void schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop = false); + void schedSendTiming(PacketPtr pkt, Tick when); /** - * Used by a port to notify the queue that a retry was received - * and that the queue can proceed and retry sending the packet - * that caused the wait. + * Retry sending a packet from the queue. Note that this is not + * necessarily the same packet if something has been added with an + * earlier time stamp. */ void retry(); unsigned int drain(DrainManager *dm); }; -class MasterPacketQueue : public PacketQueue +class ReqPacketQueue : public PacketQueue { protected: @@ -220,7 +203,7 @@ class MasterPacketQueue : public PacketQueue public: /** - * Create a master packet queue, linked to an event manager, a + * Create a request packet queue, linked to an event manager, a * master port, and a label that will be used for functional print * request packets. * @@ -228,18 +211,49 @@ class MasterPacketQueue : public PacketQueue * @param _masterPort Master port used to send the packets * @param _label Label to push on the label stack for print request packets */ - MasterPacketQueue(EventManager& _em, MasterPort& _masterPort, - const std::string _label = "MasterPacketQueue"); + ReqPacketQueue(EventManager& _em, MasterPort& _masterPort, + const std::string _label = "ReqPacketQueue"); + + virtual ~ReqPacketQueue() { } + + const std::string name() const + { return masterPort.name() + "-" + label; } + + bool sendTiming(PacketPtr pkt); + +}; + +class SnoopRespPacketQueue : public PacketQueue +{ + + protected: + + MasterPort& masterPort; + + public: + + /** + * Create a snoop response packet queue, linked to an event + * manager, a master port, and a label that will be used for + * functional print request packets. + * + * @param _em Event manager used for scheduling this queue + * @param _masterPort Master port used to send the packets + * @param _label Label to push on the label stack for print request packets + */ + SnoopRespPacketQueue(EventManager& _em, MasterPort& _masterPort, + const std::string _label = "SnoopRespPacketQueue"); - virtual ~MasterPacketQueue() { } + virtual ~SnoopRespPacketQueue() { } const std::string name() const { return masterPort.name() + "-" + label; } - bool sendTiming(PacketPtr pkt, bool send_as_snoop); + bool sendTiming(PacketPtr pkt); + }; -class SlavePacketQueue : public PacketQueue +class RespPacketQueue : public PacketQueue { protected: @@ -249,7 +263,7 @@ class SlavePacketQueue : public PacketQueue public: /** - * Create a slave packet queue, linked to an event manager, a + * Create a response packet queue, linked to an event manager, a * slave port, and a label that will be used for functional print * request packets. * @@ -257,15 +271,15 @@ class SlavePacketQueue : public PacketQueue * @param _slavePort Slave port used to send the packets * @param _label Label to push on the label stack for print request packets */ - SlavePacketQueue(EventManager& _em, SlavePort& _slavePort, - const std::string _label = "SlavePacketQueue"); + RespPacketQueue(EventManager& _em, SlavePort& _slavePort, + const std::string _label = "RespPacketQueue"); - virtual ~SlavePacketQueue() { } + virtual ~RespPacketQueue() { } const std::string name() const { return slavePort.name() + "-" + label; } - bool sendTiming(PacketPtr pkt, bool send_as_snoop); + bool sendTiming(PacketPtr pkt); }; diff --git a/src/mem/port.cc b/src/mem/port.cc index 898f19c08..03b68814f 100644 --- a/src/mem/port.cc +++ b/src/mem/port.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 ARM Limited + * Copyright (c) 2012,2015 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -190,9 +190,9 @@ MasterPort::sendTimingSnoopResp(PacketPtr pkt) } void -MasterPort::sendRetry() +MasterPort::sendRetryResp() { - _slavePort->recvRetry(); + _slavePort->recvRespRetry(); } void @@ -261,7 +261,13 @@ SlavePort::sendTimingSnoopReq(PacketPtr pkt) } void -SlavePort::sendRetry() +SlavePort::sendRetryReq() { - _masterPort->recvRetry(); + _masterPort->recvReqRetry(); +} + +void +SlavePort::sendRetrySnoopResp() +{ + _masterPort->recvRetrySnoopResp(); } diff --git a/src/mem/port.hh b/src/mem/port.hh index 6b5e9530c..0d88441dc 100644 --- a/src/mem/port.hh +++ b/src/mem/port.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 ARM Limited + * Copyright (c) 2011-2012,2015 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -214,7 +214,7 @@ class MasterPort : public BaseMasterPort * Attempt to send a timing request to the slave port by calling * its corresponding receive function. If the send does not * succeed, as indicated by the return value, then the sender must - * wait for a recvRetry at which point it can re-issue a + * wait for a recvReqRetry at which point it can re-issue a * sendTimingReq. * * @param pkt Packet to send. @@ -227,8 +227,8 @@ class MasterPort : public BaseMasterPort * Attempt to send a timing snoop response packet to the slave * port by calling its corresponding receive function. If the send * does not succeed, as indicated by the return value, then the - * sender must wait for a recvRetry at which point it can re-issue - * a sendTimingSnoopResp. + * sender must wait for a recvRetrySnoop at which point it can + * re-issue a sendTimingSnoopResp. * * @param pkt Packet to send. */ @@ -236,9 +236,11 @@ class MasterPort : public BaseMasterPort /** * Send a retry to the slave port that previously attempted a - * sendTimingResp to this master port and failed. + * sendTimingResp to this master port and failed. Note that this + * is virtual so that the "fake" snoop response port in the + * coherent crossbar can override the behaviour. */ - virtual void sendRetry(); + virtual void sendRetryResp(); /** * Determine if this master port is snooping or not. The default @@ -294,12 +296,21 @@ class MasterPort : public BaseMasterPort } /** - * Called by the slave port if sendTimingReq or - * sendTimingSnoopResp was called on this master port (causing - * recvTimingReq and recvTimingSnoopResp to be called on the - * slave port) and was unsuccesful. + * Called by the slave port if sendTimingReq was called on this + * master port (causing recvTimingReq to be called on the slave + * port) and was unsuccesful. */ - virtual void recvRetry() = 0; + virtual void recvReqRetry() = 0; + + /** + * Called by the slave port if sendTimingSnoopResp was called on this + * master port (causing recvTimingSnoopResp to be called on the slave + * port) and was unsuccesful. + */ + virtual void recvRetrySnoopResp() + { + panic("%s was not expecting a snoop retry\n", name()); + } /** * Called to receive an address range change from the peer slave @@ -356,7 +367,7 @@ class SlavePort : public BaseSlavePort * Attempt to send a timing response to the master port by calling * its corresponding receive function. If the send does not * succeed, as indicated by the return value, then the sender must - * wait for a recvRetry at which point it can re-issue a + * wait for a recvRespRetry at which point it can re-issue a * sendTimingResp. * * @param pkt Packet to send. @@ -376,10 +387,15 @@ class SlavePort : public BaseSlavePort /** * Send a retry to the master port that previously attempted a - * sendTimingReq or sendTimingSnoopResp to this slave port and - * failed. + * sendTimingReq to this slave port and failed. + */ + void sendRetryReq(); + + /** + * Send a retry to the master port that previously attempted a + * sendTimingSnoopResp to this slave port and failed. */ - void sendRetry(); + void sendRetrySnoopResp(); /** * Find out if the peer master port is snooping or not. @@ -448,7 +464,7 @@ class SlavePort : public BaseSlavePort * slave port (causing recvTimingResp to be called on the master * port) and was unsuccesful. */ - virtual void recvRetry() = 0; + virtual void recvRespRetry() = 0; }; diff --git a/src/mem/qport.hh b/src/mem/qport.hh index 5406ef114..4ab05220c 100644 --- a/src/mem/qport.hh +++ b/src/mem/qport.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 ARM Limited + * Copyright (c) 2012,2015 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -61,12 +61,10 @@ class QueuedSlavePort : public SlavePort protected: - /** Packet queue used to store outgoing requests and responses. */ - SlavePacketQueue &queue; + /** Packet queue used to store outgoing responses. */ + RespPacketQueue &respQueue; - /** This function is notification that the device should attempt to send a - * packet again. */ - virtual void recvRetry() { queue.retry(); } + void recvRespRetry() { respQueue.retry(); } public: @@ -78,8 +76,8 @@ class QueuedSlavePort : public SlavePort * QueuePort constructor. */ QueuedSlavePort(const std::string& name, MemObject* owner, - SlavePacketQueue &queue, PortID id = InvalidPortID) : - SlavePort(name, owner, id), queue(queue) + RespPacketQueue &resp_queue, PortID id = InvalidPortID) : + SlavePort(name, owner, id), respQueue(resp_queue) { } virtual ~QueuedSlavePort() { } @@ -91,39 +89,53 @@ class QueuedSlavePort : public SlavePort * @param when Absolute time (in ticks) to send packet */ void schedTimingResp(PacketPtr pkt, Tick when) - { queue.schedSendTiming(pkt, when); } + { respQueue.schedSendTiming(pkt, when); } /** Check the list of buffered packets against the supplied * functional request. */ - bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); } + bool checkFunctional(PacketPtr pkt) + { return respQueue.checkFunctional(pkt); } - unsigned int drain(DrainManager *dm) { return queue.drain(dm); } + unsigned int drain(DrainManager *dm) { return respQueue.drain(dm); } }; +/** + * The QueuedMasterPort combines two queues, a request queue and a + * snoop response queue, that both share the same port. The flow + * control for requests and snoop responses are completely + * independent, and so each queue manages its own flow control + * (retries). + */ class QueuedMasterPort : public MasterPort { protected: - /** Packet queue used to store outgoing requests and responses. */ - MasterPacketQueue &queue; + /** Packet queue used to store outgoing requests. */ + ReqPacketQueue &reqQueue; + + /** Packet queue used to store outgoing snoop responses. */ + SnoopRespPacketQueue &snoopRespQueue; + + void recvReqRetry() { reqQueue.retry(); } - /** This function is notification that the device should attempt to send a - * packet again. */ - virtual void recvRetry() { queue.retry(); } + void recvRetrySnoopResp() { snoopRespQueue.retry(); } public: /** * Create a QueuedPort with a given name, owner, and a supplied - * implementation of a packet queue. The external definition of - * the queue enables e.g. the cache to implement a specific queue + * implementation of two packet queues. The external definition of + * the queues enables e.g. the cache to implement a specific queue * behaviuor in a subclass, and provide the latter to the * QueuePort constructor. */ QueuedMasterPort(const std::string& name, MemObject* owner, - MasterPacketQueue &queue, PortID id = InvalidPortID) : - MasterPort(name, owner, id), queue(queue) + ReqPacketQueue &req_queue, + SnoopRespPacketQueue &snoop_resp_queue, + PortID id = InvalidPortID) : + MasterPort(name, owner, id), reqQueue(req_queue), + snoopRespQueue(snoop_resp_queue) { } virtual ~QueuedMasterPort() { } @@ -135,7 +147,7 @@ class QueuedMasterPort : public MasterPort * @param when Absolute time (in ticks) to send packet */ void schedTimingReq(PacketPtr pkt, Tick when) - { queue.schedSendTiming(pkt, when); } + { reqQueue.schedSendTiming(pkt, when); } /** * Schedule the sending of a timing snoop response. @@ -144,13 +156,18 @@ class QueuedMasterPort : public MasterPort * @param when Absolute time (in ticks) to send packet */ void schedTimingSnoopResp(PacketPtr pkt, Tick when) - { queue.schedSendTiming(pkt, when, true); } + { snoopRespQueue.schedSendTiming(pkt, when); } /** Check the list of buffered packets against the supplied * functional request. */ - bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); } - - unsigned int drain(DrainManager *dm) { return queue.drain(dm); } + bool checkFunctional(PacketPtr pkt) + { + return reqQueue.checkFunctional(pkt) || + snoopRespQueue.checkFunctional(pkt); + } + + unsigned int drain(DrainManager *dm) + { return reqQueue.drain(dm) + snoopRespQueue.drain(dm); } }; #endif // __MEM_QPORT_HH__ diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc index 6bcbfbcbf..a1d6ab83e 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.cc +++ b/src/mem/ruby/slicc_interface/AbstractController.cc @@ -327,7 +327,9 @@ AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt) AbstractController::MemoryPort::MemoryPort(const std::string &_name, AbstractController *_controller, const std::string &_label) - : QueuedMasterPort(_name, _controller, _queue), - _queue(*_controller, *this, _label), controller(_controller) + : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue), + reqQueue(*_controller, *this, _label), + snoopRespQueue(*_controller, *this, _label), + controller(_controller) { } diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh index 45d355b3e..f8970fb59 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.hh +++ b/src/mem/ruby/slicc_interface/AbstractController.hh @@ -181,8 +181,9 @@ class AbstractController : public MemObject, public Consumer class MemoryPort : public QueuedMasterPort { private: - // Packet queue used to store outgoing requests and responses. - MasterPacketQueue _queue; + // Packet queues used to store outgoing requests and snoop responses. + ReqPacketQueue reqQueue; + SnoopRespPacketQueue snoopRespQueue; // Controller that operates this port. AbstractController *controller; diff --git a/src/mem/ruby/structures/RubyMemoryControl.hh b/src/mem/ruby/structures/RubyMemoryControl.hh index 6b1ec1702..78bc74ad5 100644 --- a/src/mem/ruby/structures/RubyMemoryControl.hh +++ b/src/mem/ruby/structures/RubyMemoryControl.hh @@ -109,7 +109,7 @@ class RubyMemoryControl : public AbstractMemory, public Consumer // flow control for the responses being sent back class MemoryPort : public QueuedSlavePort { - SlavePacketQueue queue; + RespPacketQueue queue; RubyMemoryControl& memory; public: diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc index 47a9b13aa..43ef37f08 100644 --- a/src/mem/ruby/system/DMASequencer.cc +++ b/src/mem/ruby/system/DMASequencer.cc @@ -138,7 +138,7 @@ DMASequencer::ruby_hit_callback(PacketPtr pkt) retry = false; DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n", slave_port.name()); - slave_port.sendRetry(); + slave_port.sendRetryReq(); } testDrainComplete(); diff --git a/src/mem/ruby/system/DMASequencer.hh b/src/mem/ruby/system/DMASequencer.hh index 7b0fe58c9..bcf586acf 100644 --- a/src/mem/ruby/system/DMASequencer.hh +++ b/src/mem/ruby/system/DMASequencer.hh @@ -66,7 +66,7 @@ class DMASequencer : public MemObject class MemSlavePort : public QueuedSlavePort { private: - SlavePacketQueue queue; + RespPacketQueue queue; RubySystem* ruby_system; bool access_backing_store; diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc index b419c491c..dba71952e 100644 --- a/src/mem/ruby/system/RubyPort.cc +++ b/src/mem/ruby/system/RubyPort.cc @@ -136,7 +136,8 @@ RubyPort::getSlavePort(const std::string &if_name, PortID idx) RubyPort::PioMasterPort::PioMasterPort(const std::string &_name, RubyPort *_port) - : QueuedMasterPort(_name, _port, queue), queue(*_port, *this) + : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue), + reqQueue(*_port, *this), snoopRespQueue(*_port, *this) { DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name); } @@ -150,7 +151,8 @@ RubyPort::PioSlavePort::PioSlavePort(const std::string &_name, RubyPort::MemMasterPort::MemMasterPort(const std::string &_name, RubyPort *_port) - : QueuedMasterPort(_name, _port, queue), queue(*_port, *this) + : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue), + reqQueue(*_port, *this), snoopRespQueue(*_port, *this) { DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name); } @@ -374,7 +376,7 @@ RubyPort::ruby_hit_callback(PacketPtr pkt) DPRINTF(RubyPort, "Sequencer may now be free. SendRetry to port %s\n", (*i)->name()); - (*i)->sendRetry(); + (*i)->sendRetryReq(); } } diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh index 28a416663..2fb31ca09 100644 --- a/src/mem/ruby/system/RubyPort.hh +++ b/src/mem/ruby/system/RubyPort.hh @@ -60,7 +60,8 @@ class RubyPort : public MemObject class MemMasterPort : public QueuedMasterPort { private: - MasterPacketQueue queue; + ReqPacketQueue reqQueue; + SnoopRespPacketQueue snoopRespQueue; public: MemMasterPort(const std::string &_name, RubyPort *_port); @@ -73,7 +74,7 @@ class RubyPort : public MemObject class MemSlavePort : public QueuedSlavePort { private: - SlavePacketQueue queue; + RespPacketQueue queue; RubySystem* ruby_system; bool access_backing_store; @@ -101,7 +102,8 @@ class RubyPort : public MemObject class PioMasterPort : public QueuedMasterPort { private: - MasterPacketQueue queue; + ReqPacketQueue reqQueue; + SnoopRespPacketQueue snoopRespQueue; public: PioMasterPort(const std::string &_name, RubyPort *_port); @@ -114,7 +116,7 @@ class RubyPort : public MemObject class PioSlavePort : public QueuedSlavePort { private: - SlavePacketQueue queue; + RespPacketQueue queue; public: PioSlavePort(const std::string &_name, RubyPort *_port); diff --git a/src/mem/simple_mem.cc b/src/mem/simple_mem.cc index bf89e58fd..52fd753c8 100644 --- a/src/mem/simple_mem.cc +++ b/src/mem/simple_mem.cc @@ -178,7 +178,7 @@ SimpleMemory::release() isBusy = false; if (retryReq) { retryReq = false; - port.sendRetry(); + port.sendRetryReq(); } } @@ -216,7 +216,7 @@ SimpleMemory::getLatency() const } void -SimpleMemory::recvRetry() +SimpleMemory::recvRespRetry() { assert(retryResp); @@ -284,9 +284,9 @@ SimpleMemory::MemoryPort::recvTimingReq(PacketPtr pkt) } void -SimpleMemory::MemoryPort::recvRetry() +SimpleMemory::MemoryPort::recvRespRetry() { - memory.recvRetry(); + memory.recvRespRetry(); } SimpleMemory* diff --git a/src/mem/simple_mem.hh b/src/mem/simple_mem.hh index ba4b8bdf1..f69633690 100644 --- a/src/mem/simple_mem.hh +++ b/src/mem/simple_mem.hh @@ -101,7 +101,7 @@ class SimpleMemory : public AbstractMemory bool recvTimingReq(PacketPtr pkt); - void recvRetry(); + void recvRespRetry(); AddrRangeList getAddrRanges() const; @@ -205,7 +205,7 @@ class SimpleMemory : public AbstractMemory bool recvTimingReq(PacketPtr pkt); - void recvRetry(); + void recvRespRetry(); }; diff --git a/src/mem/tport.cc b/src/mem/tport.cc index 4408b59ba..aa783ada0 100644 --- a/src/mem/tport.cc +++ b/src/mem/tport.cc @@ -53,7 +53,7 @@ SimpleTimingPort::SimpleTimingPort(const std::string& _name, void SimpleTimingPort::recvFunctional(PacketPtr pkt) { - if (!queue.checkFunctional(pkt)) { + if (!respQueue.checkFunctional(pkt)) { // do an atomic access and throw away the returned latency recvAtomic(pkt); } diff --git a/src/mem/tport.hh b/src/mem/tport.hh index 5e80f4fab..166a23125 100644 --- a/src/mem/tport.hh +++ b/src/mem/tport.hh @@ -68,7 +68,7 @@ class SimpleTimingPort : public QueuedSlavePort * name used in the QueuedSlavePort. Access is provided through * the queue reference in the base class. */ - SlavePacketQueue queueImpl; + RespPacketQueue queueImpl; protected: diff --git a/src/mem/xbar.cc b/src/mem/xbar.cc index e98b10060..7ac937177 100644 --- a/src/mem/xbar.cc +++ b/src/mem/xbar.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2014 ARM Limited + * Copyright (c) 2011-2015 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -271,7 +271,7 @@ BaseXBar::Layer<SrcType,DstType>::retryWaiting() // tell the port to retry, which in some cases ends up calling the // layer again - retryingPort->sendRetry(); + sendRetry(retryingPort); // If the layer is still in the retry state, sendTiming wasn't // called in zero time (e.g. the cache does this), burn a cycle diff --git a/src/mem/xbar.hh b/src/mem/xbar.hh index 81b16c19d..f51b08da2 100644 --- a/src/mem/xbar.hh +++ b/src/mem/xbar.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2014 ARM Limited + * Copyright (c) 2011-2015 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -174,6 +174,16 @@ class BaseXBar : public MemObject */ void regStats(); + protected: + + /** + * Sending the actual retry, in a manner specific to the + * individual layers. Note that for a MasterPort, there is + * both a RequestLayer and a SnoopResponseLayer using the same + * port, but using different functions for the flow control. + */ + virtual void sendRetry(SrcType* retry_port) = 0; + private: /** The destination port this layer converges at. */ @@ -241,6 +251,64 @@ class BaseXBar : public MemObject }; + class ReqLayer : public Layer<SlavePort,MasterPort> + { + public: + /** + * Create a request layer and give it a name. + * + * @param _port destination port the layer converges at + * @param _xbar the crossbar this layer belongs to + * @param _name the layer's name + */ + ReqLayer(MasterPort& _port, BaseXBar& _xbar, const std::string& _name) : + Layer(_port, _xbar, _name) {} + + protected: + + void sendRetry(SlavePort* retry_port) + { retry_port->sendRetryReq(); } + }; + + class RespLayer : public Layer<MasterPort,SlavePort> + { + public: + /** + * Create a response layer and give it a name. + * + * @param _port destination port the layer converges at + * @param _xbar the crossbar this layer belongs to + * @param _name the layer's name + */ + RespLayer(SlavePort& _port, BaseXBar& _xbar, const std::string& _name) : + Layer(_port, _xbar, _name) {} + + protected: + + void sendRetry(MasterPort* retry_port) + { retry_port->sendRetryResp(); } + }; + + class SnoopRespLayer : public Layer<SlavePort,MasterPort> + { + public: + /** + * Create a snoop response layer and give it a name. + * + * @param _port destination port the layer converges at + * @param _xbar the crossbar this layer belongs to + * @param _name the layer's name + */ + SnoopRespLayer(MasterPort& _port, BaseXBar& _xbar, + const std::string& _name) : + Layer(_port, _xbar, _name) {} + + protected: + + void sendRetry(SlavePort* retry_port) + { retry_port->sendRetrySnoopResp(); } + }; + /** cycles of overhead per transaction */ const Cycles headerCycles; /** the width of the xbar in bytes */ diff --git a/src/sim/system.hh b/src/sim/system.hh index 1db28f212..9ec349a47 100644 --- a/src/sim/system.hh +++ b/src/sim/system.hh @@ -99,7 +99,7 @@ class System : public MemObject { } bool recvTimingResp(PacketPtr pkt) { panic("SystemPort does not receive timing!\n"); return false; } - void recvRetry() + void recvReqRetry() { panic("SystemPort does not expect retry!\n"); } }; |