diff options
-rw-r--r-- | src/arch/arm/ArmTLB.py | 3 | ||||
-rw-r--r-- | src/arch/arm/table_walker.cc | 3 | ||||
-rw-r--r-- | src/arch/arm/table_walker.hh | 5 | ||||
-rw-r--r-- | src/arch/x86/pagetable_walker.cc | 92 | ||||
-rw-r--r-- | src/cpu/o3/fetch_impl.hh | 2 | ||||
-rw-r--r-- | src/cpu/o3/lsq_unit_impl.hh | 2 | ||||
-rw-r--r-- | src/cpu/simple/timing.cc | 62 | ||||
-rw-r--r-- | src/dev/Device.py | 5 | ||||
-rw-r--r-- | src/dev/copy_engine.cc | 3 | ||||
-rw-r--r-- | src/dev/dma_device.cc | 52 | ||||
-rw-r--r-- | src/dev/dma_device.hh | 16 | ||||
-rw-r--r-- | src/mem/cache/cache_impl.hh | 14 | ||||
-rw-r--r-- | src/mem/packet.cc | 2 | ||||
-rw-r--r-- | src/mem/packet.hh | 23 |
14 files changed, 75 insertions, 209 deletions
diff --git a/src/arch/arm/ArmTLB.py b/src/arch/arm/ArmTLB.py index 8599fa75f..9572d2091 100644 --- a/src/arch/arm/ArmTLB.py +++ b/src/arch/arm/ArmTLB.py @@ -47,9 +47,6 @@ class ArmTableWalker(MemObject): cxx_class = 'ArmISA::TableWalker' port = MasterPort("Port for TableWalker to do walk the translation with") sys = Param.System(Parent.any, "system object parameter") - min_backoff = Param.Tick(0, "Minimum backoff delay after failed send") - max_backoff = Param.Tick(100000, "Minimum backoff delay after failed send") - class ArmTLB(SimObject): type = 'ArmTLB' diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc index 7dbe92d9b..ea71e6f1c 100644 --- a/src/arch/arm/table_walker.cc +++ b/src/arch/arm/table_walker.cc @@ -51,8 +51,7 @@ using namespace ArmISA; TableWalker::TableWalker(const Params *p) - : MemObject(p), port(this, params()->sys, params()->min_backoff, - params()->max_backoff), drainEvent(NULL), + : MemObject(p), port(this, params()->sys), drainEvent(NULL), tlb(NULL), currState(NULL), pending(false), masterId(p->sys->getMasterId(name())), doL1DescEvent(this), doL2DescEvent(this), doProcessEvent(this) diff --git a/src/arch/arm/table_walker.hh b/src/arch/arm/table_walker.hh index 1b95182c8..b6fee66ff 100644 --- a/src/arch/arm/table_walker.hh +++ b/src/arch/arm/table_walker.hh @@ -287,9 +287,8 @@ class TableWalker : public MemObject * A snooping DMA port merely calls the construtor of the DMA * port. */ - SnoopingDmaPort(MemObject *dev, System *s, Tick min_backoff, - Tick max_backoff) : - DmaPort(dev, s, min_backoff, max_backoff) + SnoopingDmaPort(MemObject *dev, System *s) : + DmaPort(dev, s) { } }; diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc index b6e6c33f4..46d608ace 100644 --- a/src/arch/x86/pagetable_walker.cc +++ b/src/arch/x86/pagetable_walker.cc @@ -570,63 +570,49 @@ bool Walker::WalkerState::recvPacket(PacketPtr pkt) { assert(pkt->isResponse()); - if (!pkt->wasNacked()) { - assert(inflight); - assert(state == Waiting); - assert(!read); - inflight--; - if (pkt->isRead()) { - state = nextState; - nextState = Ready; - PacketPtr write = NULL; - read = pkt; - timingFault = stepWalk(write); - state = Waiting; - assert(timingFault == NoFault || read == NULL); - if (write) { - writes.push_back(write); - } - sendPackets(); - } else { - sendPackets(); - } - if (inflight == 0 && read == NULL && writes.size() == 0) { - state = Ready; - nextState = Waiting; - if (timingFault == NoFault) { - /* - * Finish the translation. Now that we now the right entry is - * in the TLB, this should work with no memory accesses. - * There could be new faults unrelated to the table walk like - * permissions violations, so we'll need the return value as - * well. - */ - bool delayedResponse; - Fault fault = walker->tlb->translate(req, tc, NULL, mode, - delayedResponse, true); - assert(!delayedResponse); - // Let the CPU continue. - translation->finish(fault, req, tc, mode); - } else { - // There was a fault during the walk. Let the CPU know. - translation->finish(timingFault, req, tc, mode); - } - return true; + assert(inflight); + assert(state == Waiting); + assert(!read); + inflight--; + if (pkt->isRead()) { + state = nextState; + nextState = Ready; + PacketPtr write = NULL; + read = pkt; + timingFault = stepWalk(write); + state = Waiting; + assert(timingFault == NoFault || read == NULL); + if (write) { + writes.push_back(write); } + sendPackets(); } else { - DPRINTF(PageTableWalker, "Request was nacked. Entering retry state\n"); - pkt->reinitNacked(); - if (!walker->sendTiming(this, pkt)) { - inflight--; - retrying = true; - if (pkt->isWrite()) { - writes.push_back(pkt); - } else { - assert(!read); - read = pkt; - } + sendPackets(); + } + if (inflight == 0 && read == NULL && writes.size() == 0) { + state = Ready; + nextState = Waiting; + if (timingFault == NoFault) { + /* + * Finish the translation. Now that we now the right entry is + * in the TLB, this should work with no memory accesses. + * There could be new faults unrelated to the table walk like + * permissions violations, so we'll need the return value as + * well. + */ + bool delayedResponse; + Fault fault = walker->tlb->translate(req, tc, NULL, mode, + delayedResponse, true); + assert(!delayedResponse); + // Let the CPU continue. + translation->finish(fault, req, tc, mode); + } else { + // There was a fault during the walk. Let the CPU know. + translation->finish(timingFault, req, tc, mode); } + return true; } + return false; } diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index 81d70bd61..caafa3fe3 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -363,8 +363,6 @@ DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt) DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n", tid); - assert(!pkt->wasNacked()); - // Only change the status if it's still waiting on the icache access // to return. if (fetchStatus[tid] != IcacheWaitResponse || diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index a878b1540..7c98b99fb 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -95,8 +95,6 @@ LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); - assert(!pkt->wasNacked()); - // If this is a split access, wait until all packets are received. if (TheISA::HasUnalignedMemAcc && !state->complete()) { delete pkt->req; diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 6a9fe7efc..9022845ce 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -719,25 +719,14 @@ TimingSimpleCPU::IcachePort::ITickEvent::process() bool TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt) { - if (!pkt->wasNacked()) { - DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); - // delay processing of returned data until next CPU clock edge - Tick next_tick = cpu->nextCycle(curTick()); - - if (next_tick == curTick()) - cpu->completeIfetch(pkt); - else - tickEvent.schedule(pkt, next_tick); + DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr()); + // delay processing of returned data until next CPU clock edge + Tick next_tick = cpu->nextCycle(); - return true; - } else { - assert(cpu->_status == IcacheWaitResponse); - pkt->reinitNacked(); - if (!sendTimingReq(pkt)) { - cpu->_status = IcacheRetry; - cpu->ifetch_pkt = pkt; - } - } + if (next_tick == curTick()) + cpu->completeIfetch(pkt); + else + tickEvent.schedule(pkt, next_tick); return true; } @@ -839,32 +828,21 @@ TimingSimpleCPU::completeDrain() bool TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) { - if (!pkt->wasNacked()) { - // delay processing of returned data until next CPU clock edge - Tick next_tick = cpu->nextCycle(curTick()); + // delay processing of returned data until next CPU clock edge + Tick next_tick = cpu->nextCycle(); - if (next_tick == curTick()) { - cpu->completeDataAccess(pkt); + if (next_tick == curTick()) { + cpu->completeDataAccess(pkt); + } else { + if (!tickEvent.scheduled()) { + tickEvent.schedule(pkt, next_tick); } else { - if (!tickEvent.scheduled()) { - tickEvent.schedule(pkt, next_tick); - } else { - // In the case of a split transaction and a cache that is - // faster than a CPU we could get two responses before - // next_tick expires - if (!retryEvent.scheduled()) - cpu->schedule(retryEvent, next_tick); - return false; - } - } - - return true; - } else { - assert(cpu->_status == DcacheWaitResponse); - pkt->reinitNacked(); - if (!sendTimingReq(pkt)) { - cpu->_status = DcacheRetry; - cpu->dcache_pkt = pkt; + // In the case of a split transaction and a cache that is + // faster than a CPU we could get two responses before + // next_tick expires + if (!retryEvent.scheduled()) + cpu->schedule(retryEvent, next_tick); + return false; } } diff --git a/src/dev/Device.py b/src/dev/Device.py index 60c21df91..b1a4f69bc 100644 --- a/src/dev/Device.py +++ b/src/dev/Device.py @@ -46,11 +46,6 @@ class DmaDevice(PioDevice): type = 'DmaDevice' abstract = True dma = MasterPort("DMA port") - min_backoff_delay = Param.Latency('4ns', - "min time between a nack packet being received and the next request made by the device") - max_backoff_delay = Param.Latency('10us', - "max time between a nack packet being received and the next request made by the device") - class IsaFake(BasicPioDevice): diff --git a/src/dev/copy_engine.cc b/src/dev/copy_engine.cc index bb15abab6..77cc735a9 100644 --- a/src/dev/copy_engine.cc +++ b/src/dev/copy_engine.cc @@ -78,8 +78,7 @@ CopyEngine::CopyEngine(const Params *p) CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid) - : cePort(_ce, _ce->sys, _ce->params()->min_backoff_delay, - _ce->params()->max_backoff_delay), + : cePort(_ce, _ce->sys), ce(_ce), channelId(cid), busy(false), underReset(false), refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin), latAfterCompletion(ce->params()->latAfterCompletion), diff --git a/src/dev/dma_device.cc b/src/dev/dma_device.cc index 5a2f52df1..80253129f 100644 --- a/src/dev/dma_device.cc +++ b/src/dev/dma_device.cc @@ -47,36 +47,18 @@ #include "dev/dma_device.hh" #include "sim/system.hh" -DmaPort::DmaPort(MemObject *dev, System *s, Tick min_backoff, Tick max_backoff) +DmaPort::DmaPort(MemObject *dev, System *s) : MasterPort(dev->name() + ".dma", dev), device(dev), sys(s), masterId(s->getMasterId(dev->name())), pendingCount(0), drainEvent(NULL), - backoffTime(0), minBackoffDelay(min_backoff), - maxBackoffDelay(max_backoff), inRetry(false), - backoffEvent(this) + inRetry(false) { } bool DmaPort::recvTimingResp(PacketPtr pkt) { - if (pkt->wasNacked()) { - DPRINTF(DMA, "Received nacked %s addr %#x\n", - pkt->cmdString(), pkt->getAddr()); - - if (backoffTime < minBackoffDelay) - backoffTime = minBackoffDelay; - else if (backoffTime < maxBackoffDelay) - backoffTime <<= 1; - - device->reschedule(backoffEvent, curTick() + backoffTime, true); - - DPRINTF(DMA, "Backoff time set to %d ticks\n", backoffTime); - - pkt->reinitNacked(); - queueDma(pkt, true); - } else if (pkt->senderState) { + if (pkt->senderState) { DmaReqState *state; - backoffTime >>= 2; DPRINTF(DMA, "Received response %s addr %#x size %#x\n", pkt->cmdString(), pkt->getAddr(), pkt->req->getSize()); @@ -116,8 +98,7 @@ DmaPort::recvTimingResp(PacketPtr pkt) } DmaDevice::DmaDevice(const Params *p) - : PioDevice(p), dmaPort(this, sys, params()->min_backoff_delay, - params()->max_backoff_delay) + : PioDevice(p), dmaPort(this, sys) { } void @@ -168,16 +149,10 @@ DmaPort::recvRetry() inRetry = true; DPRINTF(DMA, "-- Failed, queued\n"); } - } while (!backoffTime && result && transmitList.size()); + } while (result && transmitList.size()); - if (transmitList.size() && backoffTime && !inRetry) { - DPRINTF(DMA, "Scheduling backoff for %d\n", curTick()+backoffTime); - if (!backoffEvent.scheduled()) - device->schedule(backoffEvent, backoffTime + curTick()); - } - DPRINTF(DMA, "TransmitList: %d, backoffTime: %d inRetry: %d es: %d\n", - transmitList.size(), backoffTime, inRetry, - backoffEvent.scheduled()); + DPRINTF(DMA, "TransmitList: %d, inRetry: %d\n", + transmitList.size(), inRetry); } void @@ -231,8 +206,8 @@ DmaPort::sendDma() Enums::MemoryMode state = sys->getMemoryMode(); if (state == Enums::timing) { - if (backoffEvent.scheduled() || inRetry) { - DPRINTF(DMA, "Can't send immediately, waiting for retry or backoff timer\n"); + if (inRetry) { + DPRINTF(DMA, "Can't send immediately, waiting for retry\n"); return; } @@ -249,14 +224,7 @@ DmaPort::sendDma() inRetry = true; DPRINTF(DMA, "-- Failed: queued\n"); } - } while (result && !backoffTime && transmitList.size()); - - if (transmitList.size() && backoffTime && !inRetry && - !backoffEvent.scheduled()) { - DPRINTF(DMA, "-- Scheduling backoff timer for %d\n", - backoffTime+curTick()); - device->schedule(backoffEvent, backoffTime + curTick()); - } + } while (result && transmitList.size()); } else if (state == Enums::atomic) { transmitList.pop_front(); diff --git a/src/dev/dma_device.hh b/src/dev/dma_device.hh index ccf388fa4..691a21749 100644 --- a/src/dev/dma_device.hh +++ b/src/dev/dma_device.hh @@ -87,16 +87,6 @@ class DmaPort : public MasterPort * here.*/ Event *drainEvent; - /** time to wait between sending another packet, increases as NACKs are - * recived, decreases as responses are recived. */ - Tick backoffTime; - - /** Minimum time that device should back off for after failed sendTiming */ - Tick minBackoffDelay; - - /** Maximum time that device should back off for after failed sendTiming */ - Tick maxBackoffDelay; - /** If the port is currently waiting for a retry before it can send whatever * it is that it's sending. */ bool inRetry; @@ -108,11 +98,9 @@ class DmaPort : public MasterPort void queueDma(PacketPtr pkt, bool front = false); void sendDma(); - /** event to give us a kick every time we backoff time is reached. */ - EventWrapper<DmaPort, &DmaPort::sendDma> backoffEvent; - public: - DmaPort(MemObject *dev, System *s, Tick min_backoff, Tick max_backoff); + + DmaPort(MemObject *dev, System *s); void dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, uint8_t *data, Tick delay, Request::Flags flag = 0); diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 4d8adbd90..2fdbc5c1d 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -692,8 +692,6 @@ Cache<TagStore>::atomicAccess(PacketPtr pkt) DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n", bus_pkt->cmdString(), bus_pkt->getAddr(), old_state); - assert(!bus_pkt->wasNacked()); - // If packet was a forward, the response (if any) is already // in place in the bus_pkt == pkt structure, so we don't need // to do anything. Otherwise, use the separate bus_pkt to @@ -823,12 +821,6 @@ Cache<TagStore>::handleResponse(PacketPtr pkt) assert(mshr); - if (pkt->wasNacked()) { - //pkt->reinitFromRequest(); - warn("NACKs from devices not connected to the same bus " - "not implemented\n"); - return; - } if (is_error) { DPRINTF(Cache, "Cache received packet with error for address %x, " "cmd: %s\n", pkt->getAddr(), pkt->cmdString()); @@ -1644,12 +1636,6 @@ template<class TagStore> bool Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt) { - // this needs to be fixed so that the cache updates the mshr and sends the - // packet back out on the link, but it probably won't happen so until this - // gets fixed, just panic when it does - if (pkt->wasNacked()) - panic("Need to implement cache resending nacked packets!\n"); - cache->handleResponse(pkt); return true; } diff --git a/src/mem/packet.cc b/src/mem/packet.cc index 69cf36a5c..dc5ff4362 100644 --- a/src/mem/packet.cc +++ b/src/mem/packet.cc @@ -154,8 +154,6 @@ MemCmd::commandInfo[] = MessageResp, "MessageReq" }, /* IntResp -- for interrupts */ { SET2(IsWrite, IsResponse), InvalidCmd, "MessageResp" }, - /* NetworkNackError -- nacked at network layer (not by protocol) */ - { SET2(IsResponse, IsError), InvalidCmd, "NetworkNackError" }, /* InvalidDestError -- packet dest field invalid */ { SET2(IsResponse, IsError), InvalidCmd, "InvalidDestError" }, /* BadAddressError -- memory address invalid */ diff --git a/src/mem/packet.hh b/src/mem/packet.hh index cdcefcadb..7396048d6 100644 --- a/src/mem/packet.hh +++ b/src/mem/packet.hh @@ -120,7 +120,6 @@ class MemCmd // @TODO these should be classified as responses rather than // requests; coding them as requests initially for backwards // compatibility - NetworkNackError, // nacked at network layer (not by protocol) InvalidDestError, // packet dest field invalid BadAddressError, // memory address invalid FunctionalReadError, // unable to fulfill functional read @@ -466,20 +465,12 @@ class Packet : public Printable // their encoding keeps changing (from result field to command // field, etc.) void - setNacked() - { - assert(isResponse()); - cmd = MemCmd::NetworkNackError; - } - - void setBadAddress() { assert(isResponse()); cmd = MemCmd::BadAddressError; } - bool wasNacked() const { return cmd == MemCmd::NetworkNackError; } bool hadBadAddress() const { return cmd == MemCmd::BadAddressError; } void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; } @@ -669,20 +660,6 @@ class Packet : public Printable } } - /** - * Take a request packet that has been returned as NACKED and - * modify it so that it can be sent out again. Only packets that - * need a response can be NACKED, so verify that that is true. - */ - void - reinitNacked() - { - assert(wasNacked()); - cmd = origCmd; - assert(needsResponse()); - clearDest(); - } - void setSize(unsigned size) { |