summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2012-08-22 11:39:59 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2012-08-22 11:39:59 -0400
commitc60db56741631b03e3431d03c26c9114c27ba6c6 (patch)
tree646cb63e6a355ca2e61569c7e8ce150b7776f12e /src/cpu
parenta6074016e211276e47238d0d708288527ace0aef (diff)
downloadgem5-c60db56741631b03e3431d03c26c9114c27ba6c6.tar.xz
Packet: Remove NACKs from packet and its use in endpoints
This patch removes the NACK frrom the packet as there is no longer any module in the system that issues them (the bridge was the only one and the previous patch removes that). The handling of NACKs was mostly avoided throughout the code base, by using e.g. panic or assert false, but in a few locations the NACKs were actually dealt with (although NACKs never occured in any of the regressions). Most notably, the DMA port will now never receive a NACK and the backoff time is thus never changed. As a consequence, the entire backoff mechanism (similar to a PCI bus) is now removed and the DMA port entirely relies on the bus performing the arbitration and issuing a retry when appropriate. This is more in line with e.g. PCIe. Surprisingly, this patch has no impact on any of the regressions. As mentioned in the patch that removes the NACK from the bridge, a follow-up patch should change the request and response buffer size for at least one regression to also verify that the system behaves as expected when the bridge fills up.
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/o3/fetch_impl.hh2
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh2
-rw-r--r--src/cpu/simple/timing.cc62
3 files changed, 20 insertions, 46 deletions
diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh
index 81d70bd61..caafa3fe3 100644
--- a/src/cpu/o3/fetch_impl.hh
+++ b/src/cpu/o3/fetch_impl.hh
@@ -363,8 +363,6 @@ DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n", tid);
- assert(!pkt->wasNacked());
-
// Only change the status if it's still waiting on the icache access
// to return.
if (fetchStatus[tid] != IcacheWaitResponse ||
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index a878b1540..7c98b99fb 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -95,8 +95,6 @@ LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
//iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
- assert(!pkt->wasNacked());
-
// If this is a split access, wait until all packets are received.
if (TheISA::HasUnalignedMemAcc && !state->complete()) {
delete pkt->req;
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 6a9fe7efc..9022845ce 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -719,25 +719,14 @@ TimingSimpleCPU::IcachePort::ITickEvent::process()
bool
TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{
- if (!pkt->wasNacked()) {
- DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
- // delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->nextCycle(curTick());
-
- if (next_tick == curTick())
- cpu->completeIfetch(pkt);
- else
- tickEvent.schedule(pkt, next_tick);
+ DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
+ // delay processing of returned data until next CPU clock edge
+ Tick next_tick = cpu->nextCycle();
- return true;
- } else {
- assert(cpu->_status == IcacheWaitResponse);
- pkt->reinitNacked();
- if (!sendTimingReq(pkt)) {
- cpu->_status = IcacheRetry;
- cpu->ifetch_pkt = pkt;
- }
- }
+ if (next_tick == curTick())
+ cpu->completeIfetch(pkt);
+ else
+ tickEvent.schedule(pkt, next_tick);
return true;
}
@@ -839,32 +828,21 @@ TimingSimpleCPU::completeDrain()
bool
TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
{
- if (!pkt->wasNacked()) {
- // delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->nextCycle(curTick());
+ // delay processing of returned data until next CPU clock edge
+ Tick next_tick = cpu->nextCycle();
- if (next_tick == curTick()) {
- cpu->completeDataAccess(pkt);
+ if (next_tick == curTick()) {
+ cpu->completeDataAccess(pkt);
+ } else {
+ if (!tickEvent.scheduled()) {
+ tickEvent.schedule(pkt, next_tick);
} else {
- if (!tickEvent.scheduled()) {
- tickEvent.schedule(pkt, next_tick);
- } else {
- // In the case of a split transaction and a cache that is
- // faster than a CPU we could get two responses before
- // next_tick expires
- if (!retryEvent.scheduled())
- cpu->schedule(retryEvent, next_tick);
- return false;
- }
- }
-
- return true;
- } else {
- assert(cpu->_status == DcacheWaitResponse);
- pkt->reinitNacked();
- if (!sendTimingReq(pkt)) {
- cpu->_status = DcacheRetry;
- cpu->dcache_pkt = pkt;
+ // In the case of a split transaction and a cache that is
+ // faster than a CPU we could get two responses before
+ // next_tick expires
+ if (!retryEvent.scheduled())
+ cpu->schedule(retryEvent, next_tick);
+ return false;
}
}