summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2014-03-23 11:12:03 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2014-03-23 11:12:03 -0400
commit116985d661f6383bf2b61fa9b9d3df96e52fdb6d (patch)
treeb4442e976e3ab715c51ec4560a2a72959a91c385
parent6557741311f28f718cc33f9abde36d7e51f3585c (diff)
downloadgem5-116985d661f6383bf2b61fa9b9d3df96e52fdb6d.tar.xz
mem: Limit the accesses to a page before forcing a precharge
This patch adds a basic starvation-prevention mechanism where a DRAM page is forced to close after a certain number of accesses. The limit is combined with the open and open-adaptive page policy and if reached causes an auto-precharge.
-rw-r--r--src/mem/SimpleDRAM.py4
-rw-r--r--src/mem/simple_dram.cc45
-rw-r--r--src/mem/simple_dram.hh9
3 files changed, 42 insertions, 16 deletions
diff --git a/src/mem/SimpleDRAM.py b/src/mem/SimpleDRAM.py
index 1f44888e7..a72bd518c 100644
--- a/src/mem/SimpleDRAM.py
+++ b/src/mem/SimpleDRAM.py
@@ -90,6 +90,10 @@ class SimpleDRAM(AbstractMemory):
addr_mapping = Param.AddrMap('RoRaBaChCo', "Address mapping policy")
page_policy = Param.PageManage('open', "Page closure management policy")
+ # enforce a limit on the number of accesses per row
+ max_accesses_per_row = Param.Unsigned(16, "Max accesses per row before "
+ "closing");
+
# pipeline latency of the controller and PHY, split into a
# frontend part and a backend part, with reads and writes serviced
# by the queues only seeing the frontend contribution, and reads
diff --git a/src/mem/simple_dram.cc b/src/mem/simple_dram.cc
index 9a0008569..505431772 100644
--- a/src/mem/simple_dram.cc
+++ b/src/mem/simple_dram.cc
@@ -77,6 +77,7 @@ SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
tXAW(p->tXAW), activationLimit(p->activation_limit),
memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
pageMgmt(p->page_policy),
+ maxAccessesPerRow(p->max_accesses_per_row),
frontendLatency(p->static_frontend_latency),
backendLatency(p->static_backend_latency),
busBusyUntil(0), writeStartTime(0),
@@ -1067,7 +1068,6 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) {
bank.openRow = dram_pkt->row;
bank.freeAt = curTick() + addDelay + accessLat;
- bank.bytesAccessed += burstSize;
// If you activated a new row do to this access, the next access
// will have to respect tRAS for this bank.
@@ -1081,9 +1081,19 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
// we are now closing this row
bytesPerActivate.sample(bank.bytesAccessed);
bank.bytesAccessed = 0;
+ bank.rowAccesses = 0;
}
- if (pageMgmt == Enums::open_adaptive) {
+ // increment the bytes accessed and the accesses per row
+ bank.bytesAccessed += burstSize;
+ ++bank.rowAccesses;
+
+ // if we reached the max, then issue with an auto-precharge
+ bool auto_precharge = bank.rowAccesses == maxAccessesPerRow;
+
+ // if we did not hit the limit, we might still want to
+ // auto-precharge
+ if (!auto_precharge && pageMgmt == Enums::open_adaptive) {
// a twist on the open page policy is to not blindly keep the
// page open, but close it if there are no row hits, and there
// are bank conflicts in the queue
@@ -1110,19 +1120,24 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
++p;
}
- // auto pre-charge
- if (!got_more_hits && got_bank_conflict) {
- bank.openRow = -1;
- bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP;
- --numBanksActive;
- if (numBanksActive == 0) {
- startTickPrechargeAll = std::max(startTickPrechargeAll,
- bank.freeAt);
- DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
- startTickPrechargeAll);
- }
- DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
+ // auto pre-charge if we have not got any more hits, and
+ // have a bank conflict
+ auto_precharge = !got_more_hits && got_bank_conflict;
+ }
+
+ // if this access should use auto-precharge, then we are
+ // closing the row
+ if (auto_precharge) {
+ bank.openRow = -1;
+ bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP;
+ --numBanksActive;
+ if (numBanksActive == 0) {
+ startTickPrechargeAll = std::max(startTickPrechargeAll,
+ bank.freeAt);
+ DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
+ startTickPrechargeAll);
}
+ DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
}
DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
@@ -1480,7 +1495,7 @@ SimpleDRAM::regStats()
.desc("What write queue length does an incoming req see");
bytesPerActivate
- .init(rowBufferSize)
+ .init(maxAccessesPerRow)
.name(name() + ".bytesPerActivate")
.desc("Bytes accessed per row activation")
.flags(nozero);
diff --git a/src/mem/simple_dram.hh b/src/mem/simple_dram.hh
index 8ecce94b7..7f58843aa 100644
--- a/src/mem/simple_dram.hh
+++ b/src/mem/simple_dram.hh
@@ -155,11 +155,12 @@ class SimpleDRAM : public AbstractMemory
Tick tRASDoneAt;
Tick actAllowedAt;
+ uint32_t rowAccesses;
uint32_t bytesAccessed;
Bank() :
openRow(INVALID_ROW), freeAt(0), tRASDoneAt(0), actAllowedAt(0),
- bytesAccessed(0)
+ rowAccesses(0), bytesAccessed(0)
{ }
};
@@ -508,6 +509,12 @@ class SimpleDRAM : public AbstractMemory
Enums::PageManage pageMgmt;
/**
+ * Max column accesses (read and write) per row, before forefully
+ * closing it.
+ */
+ const uint32_t maxAccessesPerRow;
+
+ /**
* Pipeline latency of the controller frontend. The frontend
* contribution is added to writes (that complete when they are in
* the write buffer) and reads that are serviced the write buffer.