summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2013-05-30 12:54:12 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2013-05-30 12:54:12 -0400
commitd82bffd2979ea9dec286dca1b2d10cadc111293a (patch)
treeb52e51f1bbd44202e6a4bd120f2bb7da3741acc6 /src
parent7da851d1a834fbe6dd02f87884586129786b14a6 (diff)
downloadgem5-d82bffd2979ea9dec286dca1b2d10cadc111293a.tar.xz
mem: Add static latency to the DRAM controller
This patch adds a frontend and backend static latency to the DRAM controller by delaying the responses. Two parameters expressing the frontend and backend contributions in absolute time are added to the controller, and the appropriate latency is added to the responses when adding them to the (infinite) queued port for sending. For writes and reads that hit in the write buffer, only the frontend latency is added. For reads that are serviced by the DRAM, the static latency is the sum of the pipeline latencies of the entire frontend, backend and PHY. The default values are chosen based on having roughly 10 pipeline stages in total at 500 MHz. In the future, it would be sensible to make the controller use its clock and convert these latencies (and a few of the DRAM timings) to cycles.
Diffstat (limited to 'src')
-rw-r--r--src/mem/SimpleDRAM.py7
-rw-r--r--src/mem/simple_dram.cc18
-rw-r--r--src/mem/simple_dram.hh17
3 files changed, 33 insertions, 9 deletions
diff --git a/src/mem/SimpleDRAM.py b/src/mem/SimpleDRAM.py
index 9101de101..75c5b077b 100644
--- a/src/mem/SimpleDRAM.py
+++ b/src/mem/SimpleDRAM.py
@@ -110,6 +110,13 @@ class SimpleDRAM(AbstractMemory):
addr_mapping = Param.AddrMap('RaBaChCo', "Address mapping policy")
page_policy = Param.PageManage('open', "Page closure management policy")
+ # pipeline latency of the controller and PHY, split into a
+ # frontend part and a backend part, with reads and writes serviced
+ # by the queues only seeing the frontend contribution, and reads
+ # serviced by the memory seeing the sum of the two
+ static_frontend_latency = Param.Latency("10ns", "Static frontend latency")
+ static_backend_latency = Param.Latency("10ns", "Static backend latency")
+
# the physical organisation of the DRAM
lines_per_rowbuffer = Param.Unsigned("Row buffer size in cache lines")
ranks_per_channel = Param.Unsigned("Number of ranks per channel")
diff --git a/src/mem/simple_dram.cc b/src/mem/simple_dram.cc
index 310530a69..c54947438 100644
--- a/src/mem/simple_dram.cc
+++ b/src/mem/simple_dram.cc
@@ -66,6 +66,8 @@ SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
tXAW(p->tXAW), activationLimit(p->activation_limit),
memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
pageMgmt(p->page_policy),
+ frontendLatency(p->static_frontend_latency),
+ backendLatency(p->static_backend_latency),
busBusyUntil(0), writeStartTime(0),
prevArrival(0), numReqs(0)
{
@@ -294,7 +296,7 @@ SimpleDRAM::addToReadQueue(PacketPtr pkt)
DPRINTF(DRAM, "Read to %lld serviced by write queue\n", addr);
bytesRead += bytesPerCacheLine;
bytesConsumedRd += pkt->getSize();
- accessAndRespond(pkt);
+ accessAndRespond(pkt, frontendLatency);
return;
}
}
@@ -467,7 +469,7 @@ SimpleDRAM::addToWriteQueue(PacketPtr pkt)
bytesConsumedWr += pkt->getSize();
bytesWritten += bytesPerCacheLine;
- accessAndRespond(pkt);
+ accessAndRespond(pkt, frontendLatency);
// If your write buffer is starting to fill up, drain it!
if (writeQueue.size() > writeThreshold && !stopReads){
@@ -609,7 +611,7 @@ SimpleDRAM::recvTimingReq(PacketPtr pkt)
} else {
DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
neitherReadNorWrite++;
- accessAndRespond(pkt);
+ accessAndRespond(pkt, 1);
}
retryRdReq = false;
@@ -628,7 +630,7 @@ SimpleDRAM::processRespondEvent()
// Actually responds to the requestor
bytesConsumedRd += pkt->getSize();
bytesRead += bytesPerCacheLine;
- accessAndRespond(pkt);
+ accessAndRespond(pkt, frontendLatency + backendLatency);
delete respQueue.front();
respQueue.pop_front();
@@ -737,7 +739,7 @@ SimpleDRAM::chooseNextRead()
}
void
-SimpleDRAM::accessAndRespond(PacketPtr pkt)
+SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency)
{
DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
@@ -754,9 +756,9 @@ SimpleDRAM::accessAndRespond(PacketPtr pkt)
// @todo someone should pay for this
pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
- // queue the packet in the response queue to be sent out the
- // next tick
- port.schedTimingResp(pkt, curTick() + 1);
+ // queue the packet in the response queue to be sent out after
+ // the static latency has passed
+ port.schedTimingResp(pkt, curTick() + static_latency);
} else {
// @todo the packet is going to be deleted, and the DRAMPacket
// is still having a pointer to it
diff --git a/src/mem/simple_dram.hh b/src/mem/simple_dram.hh
index 6521c6768..920dcf33a 100644
--- a/src/mem/simple_dram.hh
+++ b/src/mem/simple_dram.hh
@@ -265,8 +265,9 @@ class SimpleDRAM : public AbstractMemory
* world requestor.
*
* @param pkt The packet from the outside world
+ * @param static_latency Static latency to add before sending the packet
*/
- void accessAndRespond(PacketPtr pkt);
+ void accessAndRespond(PacketPtr pkt, Tick static_latency);
/**
* Address decoder to figure out physical mapping onto ranks,
@@ -410,6 +411,20 @@ class SimpleDRAM : public AbstractMemory
Enums::PageManage pageMgmt;
/**
+ * Pipeline latency of the controller frontend. The frontend
+ * contribution is added to writes (that complete when they are in
+ * the write buffer) and reads that are serviced the write buffer.
+ */
+ const Tick frontendLatency;
+
+ /**
+ * Pipeline latency of the backend and PHY. Along with the
+ * frontend contribution, this latency is added to reads serviced
+ * by the DRAM.
+ */
+ const Tick backendLatency;
+
+ /**
* Till when has the main data bus been spoken for already?
*/
Tick busBusyUntil;