diff options
-rw-r--r-- | src/mem/dram_ctrl.cc | 39 | ||||
-rw-r--r-- | src/mem/dram_ctrl.hh | 3 |
2 files changed, 36 insertions, 6 deletions
diff --git a/src/mem/dram_ctrl.cc b/src/mem/dram_ctrl.cc index 971c7227d..289763218 100644 --- a/src/mem/dram_ctrl.cc +++ b/src/mem/dram_ctrl.cc @@ -70,7 +70,8 @@ DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : writeBufferSize(p->write_buffer_size), writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), - minWritesPerSwitch(p->min_writes_per_switch), writesThisTime(0), + minWritesPerSwitch(p->min_writes_per_switch), + writesThisTime(0), readsThisTime(0), tWTR(p->tWTR), tBURST(p->tBURST), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), @@ -399,21 +400,26 @@ DRAMCtrl::processWriteEvent() writeQueue.pop_front(); delete dram_pkt; - ++writesThisTime; - DPRINTF(DRAM, "Writing, bus busy for %lld ticks, banks busy " "for %lld ticks\n", busBusyUntil - temp1, maxBankFreeAt() - temp2); - // If we emptied the write queue, or got below the threshold and + // If we emptied the write queue, or got sufficiently below the + // threshold (using the minWritesPerSwitch as the hysteresis) and // are not draining, or we have reads waiting and have done enough // writes, then switch to reads. The retry above could already // have caused it to be scheduled, so first check if (writeQueue.empty() || - (writeQueue.size() < writeLowThreshold && !drainManager) || + (writeQueue.size() + minWritesPerSwitch < writeLowThreshold && + !drainManager) || (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) { // turn the bus back around for reads again busBusyUntil += tWTR; stopReads = false; + + DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " + "waiting\n", writesThisTime, writeQueue.size()); + + wrPerTurnAround.sample(writesThisTime); writesThisTime = 0; if (!nextReqEvent.scheduled()) @@ -441,7 +447,9 @@ DRAMCtrl::processWriteEvent() void DRAMCtrl::triggerWrites() { - DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); + DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " + "waiting\n", readsThisTime, readQueue.size()); + // Flag variable to stop any more read scheduling stopReads = true; @@ -449,6 +457,11 @@ DRAMCtrl::triggerWrites() DPRINTF(DRAM, "Writes scheduled at %lld\n", write_start_time); + // there is some danger here as there might still be reads + // happening before the switch actually takes place + rdPerTurnAround.sample(readsThisTime); + readsThisTime = 0; + assert(write_start_time >= curTick()); assert(!writeEvent.scheduled()); schedule(writeEvent, write_start_time); @@ -1198,11 +1211,13 @@ DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) // Update the access related stats if (dram_pkt->isRead) { + ++readsThisTime; if (rowHitFlag) readRowHits++; bytesReadDRAM += burstSize; perBankRdBursts[dram_pkt->bankId]++; } else { + ++writesThisTime; if (rowHitFlag) writeRowHits++; bytesWritten += burstSize; @@ -1518,6 +1533,18 @@ DRAMCtrl::regStats() .desc("Bytes accessed per row activation") .flags(nozero); + rdPerTurnAround + .init(readBufferSize) + .name(name() + ".rdPerTurnAround") + .desc("Reads before turning the bus around for writes") + .flags(nozero); + + wrPerTurnAround + .init(writeBufferSize) + .name(name() + ".wrPerTurnAround") + .desc("Writes before turning the bus around for reads") + .flags(nozero); + bytesReadDRAM .name(name() + ".bytesReadDRAM") .desc("Total number of bytes read from DRAM"); diff --git a/src/mem/dram_ctrl.hh b/src/mem/dram_ctrl.hh index e327f0796..749296634 100644 --- a/src/mem/dram_ctrl.hh +++ b/src/mem/dram_ctrl.hh @@ -488,6 +488,7 @@ class DRAMCtrl : public AbstractMemory const uint32_t writeLowThreshold; const uint32_t minWritesPerSwitch; uint32_t writesThisTime; + uint32_t readsThisTime; /** * Basic memory timing parameters initialized based on parameter @@ -569,6 +570,8 @@ class DRAMCtrl : public AbstractMemory Stats::Vector rdQLenPdf; Stats::Vector wrQLenPdf; Stats::Histogram bytesPerActivate; + Stats::Histogram rdPerTurnAround; + Stats::Histogram wrPerTurnAround; // Latencies summed over all requests Stats::Scalar totQLat; |