/* * Copyright (c) 2010-2018 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall * not be construed as granting a license to any other intellectual * property including but not limited to intellectual property relating * to a hardware implementation of the functionality of the software * licensed hereunder. You may use the software subject to the license * terms below provided that you ensure that this notice is replicated * unmodified and in its entirety in all distributions of the software, * modified or unmodified, in source code or in binary form. * * Copyright (c) 2013 Amin Farmahini-Farahani * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Authors: Andreas Hansson * Ani Udipi * Neha Agarwal * Omar Naji * Wendy Elsasser * Radhika Jagtap */ #include "mem/dram_ctrl.hh" #include "base/bitfield.hh" #include "base/trace.hh" #include "debug/DRAM.hh" #include "debug/DRAMPower.hh" #include "debug/DRAMState.hh" #include "debug/Drain.hh" #include "debug/QOS.hh" #include "sim/system.hh" using namespace std; using namespace Data; DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) : QoS::MemCtrl(p), port(name() + ".port", *this), isTimingMode(false), retryRdReq(false), retryWrReq(false), nextReqEvent([this]{ processNextReqEvent(); }, name()), respondEvent([this]{ processRespondEvent(); }, name()), deviceSize(p->device_size), deviceBusWidth(p->device_bus_width), burstLength(p->burst_length), deviceRowBufferSize(p->device_rowbuffer_size), devicesPerRank(p->devices_per_rank), burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8), rowBufferSize(devicesPerRank * deviceRowBufferSize), columnsPerRowBuffer(rowBufferSize / burstSize), columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1), ranksPerChannel(p->ranks_per_channel), bankGroupsPerRank(p->bank_groups_per_rank), bankGroupArch(p->bank_groups_per_rank > 0), banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), readBufferSize(p->read_buffer_size), writeBufferSize(p->write_buffer_size), writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0), writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0), minWritesPerSwitch(p->min_writes_per_switch), writesThisTime(0), readsThisTime(0), tCK(p->tCK), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST), tCCD_L_WR(p->tCCD_L_WR), tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS), tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD), tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS), activationLimit(p->activation_limit), rankToRankDly(tCS + tBURST), wrToRdDly(tCL + tBURST + p->tWTR), rdToWrDly(tRTW + tBURST), memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), pageMgmt(p->page_policy), maxAccessesPerRow(p->max_accesses_per_row), frontendLatency(p->static_frontend_latency), backendLatency(p->static_backend_latency), nextBurstAt(0), prevArrival(0), nextReqTime(0), activeRank(0), timeStampOffset(0), lastStatsResetTick(0) { // sanity check the ranks since we rely on bit slicing for the // address decoding fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not " "allowed, must be a power of two\n", ranksPerChannel); fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, " "must be a power of two\n", burstSize); readQueue.resize(p->qos_priorities); writeQueue.resize(p->qos_priorities); for (int i = 0; i < ranksPerChannel; i++) { Rank* rank = new Rank(*this, p, i); ranks.push_back(rank); } // perform a basic check of the write thresholds if (p->write_low_thresh_perc >= p->write_high_thresh_perc) fatal("Write buffer low threshold %d must be smaller than the " "high threshold %d\n", p->write_low_thresh_perc, p->write_high_thresh_perc); // determine the rows per bank by looking at the total capacity uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); // determine the dram actual capacity from the DRAM config in Mbytes uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank * ranksPerChannel; // if actual DRAM size does not match memory capacity in system warn! if (deviceCapacity != capacity / (1024 * 1024)) warn("DRAM device capacity (%d Mbytes) does not match the " "address range assigned (%d Mbytes)\n", deviceCapacity, capacity / (1024 * 1024)); DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, AbstractMemory::size()); DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n", rowBufferSize, columnsPerRowBuffer); rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel); // some basic sanity checks if (tREFI <= tRP || tREFI <= tRFC) { fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n", tREFI, tRP, tRFC); } // basic bank group architecture checks -> if (bankGroupArch) { // must have at least one bank per bank group if (bankGroupsPerRank > banksPerRank) { fatal("banks per rank (%d) must be equal to or larger than " "banks groups per rank (%d)\n", banksPerRank, bankGroupsPerRank); } // must have same number of banks in each bank group if ((banksPerRank % bankGroupsPerRank) != 0) { fatal("Banks per rank (%d) must be evenly divisible by bank groups " "per rank (%d) for equal banks per bank group\n", banksPerRank, bankGroupsPerRank); } // tCCD_L should be greater than minimal, back-to-back burst delay if (tCCD_L <= tBURST) { fatal("tCCD_L (%d) should be larger than tBURST (%d) when " "bank groups per rank (%d) is greater than 1\n", tCCD_L, tBURST, bankGroupsPerRank); } // tCCD_L_WR should be greater than minimal, back-to-back burst delay if (tCCD_L_WR <= tBURST) { fatal("tCCD_L_WR (%d) should be larger than tBURST (%d) when " "bank groups per rank (%d) is greater than 1\n", tCCD_L_WR, tBURST, bankGroupsPerRank); } // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay // some datasheets might specify it equal to tRRD if (tRRD_L < tRRD) { fatal("tRRD_L (%d) should be larger than tRRD (%d) when " "bank groups per rank (%d) is greater than 1\n", tRRD_L, tRRD, bankGroupsPerRank); } } } void DRAMCtrl::init() { MemCtrl::init(); if (!port.isConnected()) { fatal("DRAMCtrl %s is unconnected!\n", name()); } else { port.sendRangeChange(); } // a bit of sanity checks on the interleaving, save it for here to // ensure that the system pointer is initialised if (range.interleaved()) { if (channels != range.stripes()) fatal("%s has %d interleaved address stripes but %d channel(s)\n", name(), range.stripes(), channels); if (addrMapping == Enums::RoRaBaChCo) { if (rowBufferSize != range.granularity()) { fatal("Channel interleaving of %s doesn't match RoRaBaChCo " "address map\n", name()); } } else if (addrMapping == Enums::RoRaBaCoCh || addrMapping == Enums::RoCoRaBaCh) { // for the interleavings with channel bits in the bottom, // if the system uses a channel striping granularity that // is larger than the DRAM burst size, then map the // sequential accesses within a stripe to a number of // columns in the DRAM, effectively placing some of the // lower-order column bits as the least-significant bits // of the address (above the ones denoting the burst size) assert(columnsPerStripe >= 1); // channel striping has to be done at a granularity that // is equal or larger to a cache line if (system()->cacheLineSize() > range.granularity()) { fatal("Channel interleaving of %s must be at least as large " "as the cache line size\n", name()); } // ...and equal or smaller than the row-buffer size if (rowBufferSize < range.granularity()) { fatal("Channel interleaving of %s must be at most as large " "as the row-buffer size\n", name()); } // this is essentially the check above, so just to be sure assert(columnsPerStripe <= columnsPerRowBuffer); } } } void DRAMCtrl::startup() { // remember the memory system mode of operation isTimingMode = system()->isTimingMode(); if (isTimingMode) { // timestamp offset should be in clock cycles for DRAMPower timeStampOffset = divCeil(curTick(), tCK); // update the start tick for the precharge accounting to the // current tick for (auto r : ranks) { r->startup(curTick() + tREFI - tRP); } // shift the bus busy time sufficiently far ahead that we never // have to worry about negative values when computing the time for // the next request, this will add an insignificant bubble at the // start of simulation nextBurstAt = curTick() + tRP + tRCD; } } Tick DRAMCtrl::recvAtomic(PacketPtr pkt) { DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); panic_if(pkt->cacheResponding(), "Should not see packets where cache " "is responding"); // do the actual memory access and turn the packet into a response access(pkt); Tick latency = 0; if (pkt->hasData()) { // this value is not supposed to be accurate, just enough to // keep things going, mimic a closed page latency = tRP + tRCD + tCL; } return latency; } bool DRAMCtrl::readQueueFull(unsigned int neededEntries) const { DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n", readBufferSize, totalReadQueueSize + respQueue.size(), neededEntries); auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries; return rdsize_new > readBufferSize; } bool DRAMCtrl::writeQueueFull(unsigned int neededEntries) const { DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n", writeBufferSize, totalWriteQueueSize, neededEntries); auto wrsize_new = (totalWriteQueueSize + neededEntries); return wrsize_new > writeBufferSize; } DRAMCtrl::DRAMPacket* DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead) { // decode the address based on the address mapping scheme, with // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and // channel, respectively uint8_t rank; uint8_t bank; // use a 64-bit unsigned during the computations as the row is // always the top bits, and check before creating the DRAMPacket uint64_t row; // truncate the address to a DRAM burst, which makes it unique to // a specific column, row, bank, rank and channel Addr addr = dramPktAddr / burstSize; // we have removed the lowest order address bits that denote the // position within the column if (addrMapping == Enums::RoRaBaChCo) { // the lowest order bits denote the column to ensure that // sequential cache lines occupy the same row addr = addr / columnsPerRowBuffer; // take out the channel part of the address addr = addr / channels; // after the channel bits, get the bank bits to interleave // over the banks bank = addr % banksPerRank; addr = addr / banksPerRank; // after the bank, we get the rank bits which thus interleaves // over the ranks rank = addr % ranksPerChannel; addr = addr / ranksPerChannel; // lastly, get the row bits, no need to remove them from addr row = addr % rowsPerBank; } else if (addrMapping == Enums::RoRaBaCoCh) { // take out the lower-order column bits addr = addr / columnsPerStripe; // take out the channel part of the address addr = addr / channels; // next, the higher-order column bites addr = addr / (columnsPerRowBuffer / columnsPerStripe); // after the column bits, we get the bank bits to interleave // over the banks bank = addr % banksPerRank; addr = addr / banksPerRank; // after the bank, we get the rank bits which thus interleaves // over the ranks rank = addr % ranksPerChannel; addr = addr / ranksPerChannel; // lastly, get the row bits, no need to remove them from addr row = addr % rowsPerBank; } else if (addrMapping == Enums::RoCoRaBaCh) { // optimise for closed page mode and utilise maximum // parallelism of the DRAM (at the cost of power) // take out the lower-order column bits addr = addr / columnsPerStripe; // take out the channel part of the address, not that this has // to match with how accesses are interleaved between the // controllers in the address mapping addr = addr / channels; // start with the bank bits, as this provides the maximum // opportunity for parallelism between requests bank = addr % banksPerRank; addr = addr / banksPerRank; // next get the rank bits rank = addr % ranksPerChannel; addr = addr / ranksPerChannel; // next, the higher-order column bites addr = addr / (columnsPerRowBuffer / columnsPerStripe); // lastly, get the row bits, no need to remove them from addr row = addr % rowsPerBank; } else panic("Unknown address mapping policy chosen!"); assert(rank < ranksPerChannel); assert(bank < banksPerRank); assert(row < rowsPerBank); assert(row < Bank::NO_ROW); DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", dramPktAddr, rank, bank, row); // create the corresponding DRAM packet with the entry time and // ready time set to the current tick, the latter will be updated // later uint16_t bank_id = banksPerRank * rank + bank; return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr, size, ranks[rank]->banks[bank], *ranks[rank]); } void DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount) { // only add to the read queue here. whenever the request is // eventually done, set the readyTime, and call schedule() assert(!pkt->isWrite()); assert(pktCount != 0); // if the request size is larger than burst size, the pkt is split into // multiple DRAM packets // Note if the pkt starting address is not aligened to burst size, the // address of first DRAM packet is kept unaliged. Subsequent DRAM packets // are aligned to burst size boundaries. This is to ensure we accurately // check read packets against packets in write queue. Addr addr = pkt->getAddr(); unsigned pktsServicedByWrQ = 0; BurstHelper* burst_helper = NULL; for (int cnt = 0; cnt < pktCount; ++cnt) { unsigned size = std::min((addr | (burstSize - 1)) + 1, pkt->getAddr() + pkt->getSize()) - addr; readPktSize[ceilLog2(size)]++; readBursts++; masterReadAccesses[pkt->masterId()]++; // First check write buffer to see if the data is already at // the controller bool foundInWrQ = false; Addr burst_addr = burstAlign(addr); // if the burst address is not present then there is no need // looking any further if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) { for (const auto& vec : writeQueue) { for (const auto& p : vec) { // check if the read is subsumed in the write queue // packet we are looking at if (p->addr <= addr && ((addr + size) <= (p->addr + p->size))) { foundInWrQ = true; servicedByWrQ++; pktsServicedByWrQ++; DPRINTF(DRAM, "Read to addr %lld with size %d serviced by " "write queue\n", addr, size); bytesReadWrQ += burstSize; break; } } } } // If not found in the write q, make a DRAM packet and // push it onto the read queue if (!foundInWrQ) { // Make the burst helper for split packets if (pktCount > 1 && burst_helper == NULL) { DPRINTF(DRAM, "Read to addr %lld translates to %d " "dram requests\n", pkt->getAddr(), pktCount); burst_helper = new BurstHelper(pktCount); } DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true); dram_pkt->burstHelper = burst_helper; assert(!readQueueFull(1)); rdQLenPdf[totalReadQueueSize + respQueue.size()]++; DPRINTF(DRAM, "Adding to read queue\n"); readQueue[dram_pkt->qosValue()].push_back(dram_pkt); ++dram_pkt->rankRef.readEntries; // log packet logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(), dram_pkt->addr, 1); // Update stats avgRdQLen = totalReadQueueSize + respQueue.size(); } // Starting address of next dram pkt (aligend to burstSize boundary) addr = (addr | (burstSize - 1)) + 1; } // If all packets are serviced by write queue, we send the repsonse back if (pktsServicedByWrQ == pktCount) { accessAndRespond(pkt, frontendLatency); return; } // Update how many split packets are serviced by write queue if (burst_helper != NULL) burst_helper->burstsServiced = pktsServicedByWrQ; // If we are not already scheduled to get a request out of the // queue, do so now if (!nextReqEvent.scheduled()) { DPRINTF(DRAM, "Request scheduled immediately\n"); schedule(nextReqEvent, curTick()); } } void DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount) { // only add to the write queue here. whenever the request is // eventually done, set the readyTime, and call schedule() assert(pkt->isWrite()); // if the request size is larger than burst size, the pkt is split into // multiple DRAM packets Addr addr = pkt->getAddr(); for (int cnt = 0; cnt < pktCount; ++cnt) { unsigned size = std::min((addr | (burstSize - 1)) + 1, pkt->getAddr() + pkt->getSize()) - addr; writePktSize[ceilLog2(size)]++; writeBursts++; masterWriteAccesses[pkt->masterId()]++; // see if we can merge with an existing item in the write // queue and keep track of whether we have merged or not bool merged = isInWriteQueue.find(burstAlign(addr)) != isInWriteQueue.end(); // if the item was not merged we need to create a new write // and enqueue it if (!merged) { DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false); assert(totalWriteQueueSize < writeBufferSize); wrQLenPdf[totalWriteQueueSize]++; DPRINTF(DRAM, "Adding to write queue\n"); writeQueue[dram_pkt->qosValue()].push_back(dram_pkt); isInWriteQueue.insert(burstAlign(addr)); // log packet logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(), dram_pkt->addr, 1); assert(totalWriteQueueSize == isInWriteQueue.size()); // Update stats avgWrQLen = totalWriteQueueSize; // increment write entries of the rank ++dram_pkt->rankRef.writeEntries; } else { DPRINTF(DRAM, "Merging write burst with existing queue entry\n"); // keep track of the fact that this burst effectively // disappeared as it was merged with an existing one mergedWrBursts++; } // Starting address of next dram pkt (aligend to burstSize boundary) addr = (addr | (burstSize - 1)) + 1; } // we do not wait for the writes to be send to the actual memory, // but instead take responsibility for the consistency here and // snoop the write queue for any upcoming reads // @todo, if a pkt size is larger than burst size, we might need a // different front end latency accessAndRespond(pkt, frontendLatency); // If we are not already scheduled to get a request out of the // queue, do so now if (!nextReqEvent.scheduled()) { DPRINTF(DRAM, "Request scheduled immediately\n"); schedule(nextReqEvent, curTick()); } } void DRAMCtrl::printQs() const { #if TRACING_ON DPRINTF(DRAM, "===READ QUEUE===\n\n"); for (const auto& queue : readQueue) { for (const auto& packet : queue) { DPRINTF(DRAM, "Read %lu\n", packet->addr); } } DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); for (const auto& packet : respQueue) { DPRINTF(DRAM, "Response %lu\n", packet->addr); } DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); for (const auto& queue : writeQueue) { for (const auto& packet : queue) { DPRINTF(DRAM, "Write %lu\n", packet->addr); } } #endif // TRACING_ON } bool DRAMCtrl::recvTimingReq(PacketPtr pkt) { // This is where we enter from the outside world DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", pkt->cmdString(), pkt->getAddr(), pkt->getSize()); panic_if(pkt->cacheResponding(), "Should not see packets where cache " "is responding"); panic_if(!(pkt->isRead() || pkt->isWrite()), "Should only see read and writes at memory controller\n"); // Calc avg gap between requests if (prevArrival != 0) { totGap += curTick() - prevArrival; } prevArrival = curTick(); // Find out how many dram packets a pkt translates to // If the burst size is equal or larger than the pkt size, then a pkt // translates to only one dram packet. Otherwise, a pkt translates to // multiple dram packets unsigned size = pkt->getSize(); unsigned offset = pkt->getAddr() & (burstSize - 1); unsigned int dram_pkt_count = divCeil(offset + size, burstSize); // run the QoS scheduler and assign a QoS priority value to the packet qosSchedule( { &readQueue, &writeQueue }, burstSize, pkt); // check local buffers and do not accept if full if (pkt->isRead()) { assert(size != 0); if (readQueueFull(dram_pkt_count)) { DPRINTF(DRAM, "Read queue full, not accepting\n"); // remember that we have to retry this port retryRdReq = true; numRdRetry++; return false; } else { addToReadQueue(pkt, dram_pkt_count); readReqs++; bytesReadSys += size; } } else { assert(pkt->isWrite()); assert(size != 0); if (writeQueueFull(dram_pkt_count)) { DPRINTF(DRAM, "Write queue full, not accepting\n"); // remember that we have to retry this port retryWrReq = true; numWrRetry++; return false; } else { addToWriteQueue(pkt, dram_pkt_count); writeReqs++; bytesWrittenSys += size; } } return true; } void DRAMCtrl::processRespondEvent() { DPRINTF(DRAM, "processRespondEvent(): Some req has reached its readyTime\n"); DRAMPacket* dram_pkt = respQueue.front(); // if a read has reached its ready-time, decrement the number of reads // At this point the packet has been handled and there is a possibility // to switch to low-power mode if no other packet is available --dram_pkt->rankRef.readEntries; DPRINTF(DRAM, "number of read entries for rank %d is %d\n", dram_pkt->rank, dram_pkt->rankRef.readEntries); // counter should at least indicate one outstanding request // for this read assert(dram_pkt->rankRef.outstandingEvents > 0); // read response received, decrement count --dram_pkt->rankRef.outstandingEvents; // at this moment should not have transitioned to a low-power state assert((dram_pkt->rankRef.pwrState != PWR_SREF) && (dram_pkt->rankRef.pwrState != PWR_PRE_PDN) && (dram_pkt->rankRef.pwrState != PWR_ACT_PDN)); // track if this is the last packet before idling // and that there are no outstanding commands to this rank if (dram_pkt->rankRef.isQueueEmpty() && dram_pkt->rankRef.outstandingEvents == 0) { // verify that there are no events scheduled assert(!dram_pkt->rankRef.activateEvent.scheduled()); assert(!dram_pkt->rankRef.prechargeEvent.scheduled()); // if coming from active state, schedule power event to // active power-down else go to precharge power-down DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is " "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState); // default to ACT power-down unless already in IDLE state // could be in IDLE if PRE issued before data returned PowerState next_pwr_state = PWR_ACT_PDN; if (dram_pkt->rankRef.pwrState == PWR_IDLE) { next_pwr_state = PWR_PRE_PDN; } dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick()); } if (dram_pkt->burstHelper) { // it is a split packet dram_pkt->burstHelper->burstsServiced++; if (dram_pkt->burstHelper->burstsServiced == dram_pkt->burstHelper->burstCount) { // we have now serviced all children packets of a system packet // so we can now respond to the requester // @todo we probably want to have a different front end and back // end latency for split packets accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); delete dram_pkt->burstHelper; dram_pkt->burstHelper = NULL; } } else { // it is not a split packet accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency); } delete respQueue.front(); respQueue.pop_front(); if (!respQueue.empty()) { assert(respQueue.front()->readyTime >= curTick()); assert(!respondEvent.scheduled()); schedule(respondEvent, respQueue.front()->readyTime); } else { // if there is nothing left in any queue, signal a drain if (drainState() == DrainState::Draining && !totalWriteQueueSize && !totalReadQueueSize && allRanksDrained()) { DPRINTF(Drain, "DRAM controller done draining\n"); signalDrainDone(); } } // We have made a location in the queue available at this point, // so if there is a read that was forced to wait, retry now if (retryRdReq) { retryRdReq = false; port.sendRetryReq(); } } DRAMCtrl::DRAMPacketQueue::iterator DRAMCtrl::chooseNext(DRAMPacketQueue& queue, Tick extra_col_delay) { // This method does the arbitration between requests. DRAMCtrl::DRAMPacketQueue::iterator ret = queue.end(); if (!queue.empty()) { if (queue.size() == 1) { // available rank corresponds to state refresh idle DRAMPacket* dram_pkt = *(queue.begin()); if (ranks[dram_pkt->rank]->inRefIdleState()) { ret = queue.begin(); DPRINTF(DRAM, "Single request, going to a free rank\n"); } else { DPRINTF(DRAM, "Single request, going to a busy rank\n"); } } else if (memSchedPolicy == Enums::fcfs) { // check if there is a packet going to a free rank for (auto i = queue.begin(); i != queue.end(); ++i) { DRAMPacket* dram_pkt = *i; if (ranks[dram_pkt->rank]->inRefIdleState()) { ret = i; break; } } } else if (memSchedPolicy == Enums::frfcfs) { ret = chooseNextFRFCFS(queue, extra_col_delay); } else { panic("No scheduling policy chosen\n"); } } return ret; } DRAMCtrl::DRAMPacketQueue::iterator DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick extra_col_delay) { // Only determine this if needed vector earliest_banks(ranksPerChannel, 0); // Has minBankPrep been called to populate earliest_banks? bool filled_earliest_banks = false; // can the PRE/ACT sequence be done without impacting utlization? bool hidden_bank_prep = false; // search for seamless row hits first, if no seamless row hit is // found then determine if there are other packets that can be issued // without incurring additional bus delay due to bank timing // Will select closed rows first to enable more open row possibilies // in future selections bool found_hidden_bank = false; // remember if we found a row hit, not seamless, but bank prepped // and ready bool found_prepped_pkt = false; // if we have no row hit, prepped or not, and no seamless packet, // just go for the earliest possible bool found_earliest_pkt = false; auto selected_pkt_it = queue.end(); // time we need to issue a column command to be seamless const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick()); for (auto i = queue.begin(); i != queue.end() ; ++i) { DRAMPacket* dram_pkt = *i; const Bank& bank = dram_pkt->bankRef; const Tick col_allowed_at = dram_pkt->isRead() ? bank.rdAllowedAt : bank.wrAllowedAt; DPRINTF(DRAM, "%s checking packet in bank %d\n", __func__, dram_pkt->bankRef.bank); // check if rank is not doing a refresh and thus is available, if not, // jump to the next packet if (dram_pkt->rankRef.inRefIdleState()) { DPRINTF(DRAM, "%s bank %d - Rank %d available\n", __func__, dram_pkt->bankRef.bank, dram_pkt->rankRef.rank); // check if it is a row hit if (bank.openRow == dram_pkt->row) { // no additional rank-to-rank or same bank-group // delays, or we switched read/write and might as well // go for the row hit if (col_allowed_at <= min_col_at) { // FCFS within the hits, giving priority to // commands that can issue seamlessly, without // additional delay, such as same rank accesses // and/or different bank-group accesses DPRINTF(DRAM, "%s Seamless row buffer hit\n", __func__); selected_pkt_it = i; // no need to look through the remaining queue entries break; } else if (!found_hidden_bank && !found_prepped_pkt) { // if we did not find a packet to a closed row that can // issue the bank commands without incurring delay, and // did not yet find a packet to a prepped row, remember // the current one selected_pkt_it = i; found_prepped_pkt = true; DPRINTF(DRAM, "%s Prepped row buffer hit\n", __func__); } } else if (!found_earliest_pkt) { // if we have not initialised the bank status, do it // now, and only once per scheduling decisions if (!filled_earliest_banks) { // determine entries with earliest bank delay std::tie(earliest_banks, hidden_bank_prep) = minBankPrep(queue, min_col_at); filled_earliest_banks = true; } // bank is amongst first available banks // minBankPrep will give priority to packets that can // issue seamlessly if (bits(earliest_banks[dram_pkt->rank], dram_pkt->bank, dram_pkt->bank)) { found_earliest_pkt = true; found_hidden_bank = hidden_bank_prep; // give priority to packets that can issue // bank commands 'behind the scenes' // any additional delay if any will be due to // col-to-col command requirements if (hidden_bank_prep || !found_prepped_pkt) selected_pkt_it = i; } } } else { DPRINTF(DRAM, "%s bank %d - Rank %d not available\n", __func__, dram_pkt->bankRef.bank, dram_pkt->rankRef.rank); } } if (selected_pkt_it == queue.end()) { DPRINTF(DRAM, "%s no available ranks found\n", __func__); } return selected_pkt_it; } void DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) { DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); bool needsResponse = pkt->needsResponse(); // do the actual memory access which also turns the packet into a // response access(pkt); // turn packet around to go back to requester if response expected if (needsResponse) { // access already turned the packet into a response assert(pkt->isResponse()); // response_time consumes the static latency and is charged also // with headerDelay that takes into account the delay provided by // the xbar and also the payloadDelay that takes into account the // number of data beats. Tick response_time = curTick() + static_latency + pkt->headerDelay + pkt->payloadDelay; // Here we reset the timing of the packet before sending it out. pkt->headerDelay = pkt->payloadDelay = 0; // queue the packet in the response queue to be sent out after // the static latency has passed port.schedTimingResp(pkt, response_time, true); } else { // @todo the packet is going to be deleted, and the DRAMPacket // is still having a pointer to it pendingDelete.reset(pkt); } DPRINTF(DRAM, "Done\n"); return; } void DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, Tick act_tick, uint32_t row) { assert(rank_ref.actTicks.size() == activationLimit); DPRINTF(DRAM, "Activate at tick %d\n", act_tick); // update the open row assert(bank_ref.openRow == Bank::NO_ROW); bank_ref.openRow = row; // start counting anew, this covers both the case when we // auto-precharged, and when this access is forced to // precharge bank_ref.bytesAccessed = 0; bank_ref.rowAccesses = 0; ++rank_ref.numBanksActive; assert(rank_ref.numBanksActive <= banksPerRank); DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n", bank_ref.bank, rank_ref.rank, act_tick, ranks[rank_ref.rank]->numBanksActive); rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank, act_tick)); DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) - timeStampOffset, bank_ref.bank, rank_ref.rank); // The next access has to respect tRAS for this bank bank_ref.preAllowedAt = act_tick + tRAS; // Respect the row-to-column command delay for both read and write cmds bank_ref.rdAllowedAt = std::max(act_tick + tRCD, bank_ref.rdAllowedAt); bank_ref.wrAllowedAt = std::max(act_tick + tRCD, bank_ref.wrAllowedAt); // start by enforcing tRRD for (int i = 0; i < banksPerRank; i++) { // next activate to any bank in this rank must not happen // before tRRD if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { // bank group architecture requires longer delays between // ACT commands within the same bank group. Use tRRD_L // in this case rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L, rank_ref.banks[i].actAllowedAt); } else { // use shorter tRRD value when either // 1) bank group architecture is not supportted // 2) bank is in a different bank group rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD, rank_ref.banks[i].actAllowedAt); } } // next, we deal with tXAW, if the activation limit is disabled // then we directly schedule an activate power event if (!rank_ref.actTicks.empty()) { // sanity check if (rank_ref.actTicks.back() && (act_tick - rank_ref.actTicks.back()) < tXAW) { panic("Got %d activates in window %d (%llu - %llu) which " "is smaller than %llu\n", activationLimit, act_tick - rank_ref.actTicks.back(), act_tick, rank_ref.actTicks.back(), tXAW); } // shift the times used for the book keeping, the last element // (highest index) is the oldest one and hence the lowest value rank_ref.actTicks.pop_back(); // record an new activation (in the future) rank_ref.actTicks.push_front(act_tick); // cannot activate more than X times in time window tXAW, push the // next one (the X + 1'st activate) to be tXAW away from the // oldest in our window of X if (rank_ref.actTicks.back() && (act_tick - rank_ref.actTicks.back()) < tXAW) { DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " "no earlier than %llu\n", activationLimit, rank_ref.actTicks.back() + tXAW); for (int j = 0; j < banksPerRank; j++) // next activate must not happen before end of window rank_ref.banks[j].actAllowedAt = std::max(rank_ref.actTicks.back() + tXAW, rank_ref.banks[j].actAllowedAt); } } // at the point when this activate takes place, make sure we // transition to the active power state if (!rank_ref.activateEvent.scheduled()) schedule(rank_ref.activateEvent, act_tick); else if (rank_ref.activateEvent.when() > act_tick) // move it sooner in time reschedule(rank_ref.activateEvent, act_tick); } void DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace) { // make sure the bank has an open row assert(bank.openRow != Bank::NO_ROW); // sample the bytes per activate here since we are closing // the page bytesPerActivate.sample(bank.bytesAccessed); bank.openRow = Bank::NO_ROW; // no precharge allowed before this one bank.preAllowedAt = pre_at; Tick pre_done_at = pre_at + tRP; bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at); assert(rank_ref.numBanksActive != 0); --rank_ref.numBanksActive; DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got " "%d active\n", bank.bank, rank_ref.rank, pre_at, rank_ref.numBanksActive); if (trace) { rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank, pre_at)); DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) - timeStampOffset, bank.bank, rank_ref.rank); } // if we look at the current number of active banks we might be // tempted to think the DRAM is now idle, however this can be // undone by an activate that is scheduled to happen before we // would have reached the idle state, so schedule an event and // rather check once we actually make it to the point in time when // the (last) precharge takes place if (!rank_ref.prechargeEvent.scheduled()) { schedule(rank_ref.prechargeEvent, pre_done_at); // New event, increment count ++rank_ref.outstandingEvents; } else if (rank_ref.prechargeEvent.when() < pre_done_at) { reschedule(rank_ref.prechargeEvent, pre_done_at); } } void DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) { DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); // get the rank Rank& rank = dram_pkt->rankRef; // are we in or transitioning to a low-power state and have not scheduled // a power-up event? // if so, wake up from power down to issue RD/WR burst if (rank.inLowPowerState) { assert(rank.pwrState != PWR_SREF); rank.scheduleWakeUpEvent(tXP); } // get the bank Bank& bank = dram_pkt->bankRef; // for the state we need to track if it is a row hit or not bool row_hit = true; // Determine the access latency and update the bank state if (bank.openRow == dram_pkt->row) { // nothing to do } else { row_hit = false; // If there is a page open, precharge it. if (bank.openRow != Bank::NO_ROW) { prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick())); } // next we need to account for the delay in activating the // page Tick act_tick = std::max(bank.actAllowedAt, curTick()); // Record the activation and deal with all the global timing // constraints caused be a new activation (tRRD and tXAW) activateBank(rank, bank, act_tick, dram_pkt->row); } // respect any constraints on the command (e.g. tRCD or tCCD) const Tick col_allowed_at = dram_pkt->isRead() ? bank.rdAllowedAt : bank.wrAllowedAt; // we need to wait until the bus is available before we can issue // the command; need minimum of tBURST between commands Tick cmd_at = std::max({col_allowed_at, nextBurstAt, curTick()}); // update the packet ready time dram_pkt->readyTime = cmd_at + tCL + tBURST; // update the time for the next read/write burst for each // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here) Tick dly_to_rd_cmd; Tick dly_to_wr_cmd; for (int j = 0; j < ranksPerChannel; j++) { for (int i = 0; i < banksPerRank; i++) { // next burst to same bank group in this rank must not happen // before tCCD_L. Different bank group timing requirement is // tBURST; Add tCS for different ranks if (dram_pkt->rank == j) { if (bankGroupArch && (bank.bankgr == ranks[j]->banks[i].bankgr)) { // bank group architecture requires longer delays between // RD/WR burst commands to the same bank group. // tCCD_L is default requirement for same BG timing // tCCD_L_WR is required for write-to-write // Need to also take bus turnaround delays into account dly_to_rd_cmd = dram_pkt->isRead() ? tCCD_L : std::max(tCCD_L, wrToRdDly); dly_to_wr_cmd = dram_pkt->isRead() ? std::max(tCCD_L, rdToWrDly) : tCCD_L_WR; } else { // tBURST is default requirement for diff BG timing // Need to also take bus turnaround delays into account dly_to_rd_cmd = dram_pkt->isRead() ? tBURST : wrToRdDly; dly_to_wr_cmd = dram_pkt->isRead() ? rdToWrDly : tBURST; } } else { // different rank is by default in a different bank group and // doesn't require longer tCCD or additional RTW, WTR delays // Need to account for rank-to-rank switching with tCS dly_to_wr_cmd = rankToRankDly; dly_to_rd_cmd = rankToRankDly; } ranks[j]->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd, ranks[j]->banks[i].rdAllowedAt); ranks[j]->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd, ranks[j]->banks[i].wrAllowedAt); } } // Save rank of current access activeRank = dram_pkt->rank; // If this is a write, we also need to respect the write recovery // time before a precharge, in the case of a read, respect the // read to precharge constraint bank.preAllowedAt = std::max(bank.preAllowedAt, dram_pkt->isRead() ? cmd_at + tRTP : dram_pkt->readyTime + tWR); // increment the bytes accessed and the accesses per row bank.bytesAccessed += burstSize; ++bank.rowAccesses; // if we reached the max, then issue with an auto-precharge bool auto_precharge = pageMgmt == Enums::close || bank.rowAccesses == maxAccessesPerRow; // if we did not hit the limit, we might still want to // auto-precharge if (!auto_precharge && (pageMgmt == Enums::open_adaptive || pageMgmt == Enums::close_adaptive)) { // a twist on the open and close page policies: // 1) open_adaptive page policy does not blindly keep the // page open, but close it if there are no row hits, and there // are bank conflicts in the queue // 2) close_adaptive page policy does not blindly close the // page, but closes it only if there are no row hits in the queue. // In this case, only force an auto precharge when there // are no same page hits in the queue bool got_more_hits = false; bool got_bank_conflict = false; // either look at the read queue or write queue const std::vector& queue = dram_pkt->isRead() ? readQueue : writeQueue; for (uint8_t i = 0; i < numPriorities(); ++i) { auto p = queue[i].begin(); // keep on looking until we find a hit or reach the end of the queue // 1) if a hit is found, then both open and close adaptive policies keep // the page open // 2) if no hit is found, got_bank_conflict is set to true if a bank // conflict request is waiting in the queue // 3) make sure we are not considering the packet that we are // currently dealing with while (!got_more_hits && p != queue[i].end()) { if (dram_pkt != (*p)) { bool same_rank_bank = (dram_pkt->rank == (*p)->rank) && (dram_pkt->bank == (*p)->bank); bool same_row = dram_pkt->row == (*p)->row; got_more_hits |= same_rank_bank && same_row; got_bank_conflict |= same_rank_bank && !same_row; } ++p; } if (got_more_hits) break; } // auto pre-charge when either // 1) open_adaptive policy, we have not got any more hits, and // have a bank conflict // 2) close_adaptive policy and we have not got any more hits auto_precharge = !got_more_hits && (got_bank_conflict || pageMgmt == Enums::close_adaptive); } // DRAMPower trace command to be written std::string mem_cmd = dram_pkt->isRead() ? "RD" : "WR"; // MemCommand required for DRAMPower library MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD : MemCommand::WR; // Update bus state to reflect when previous command was issued nextBurstAt = cmd_at + tBURST; DPRINTF(DRAM, "Access to %lld, ready at %lld next burst at %lld.\n", dram_pkt->addr, dram_pkt->readyTime, nextBurstAt); dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank, cmd_at)); DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) - timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank); // if this access should use auto-precharge, then we are // closing the row after the read/write burst if (auto_precharge) { // if auto-precharge push a PRE command at the correct tick to the // list used by DRAMPower library to calculate power prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt)); DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId); } // Update the minimum timing between the requests, this is a // conservative estimate of when we have to schedule the next // request to not introduce any unecessary bubbles. In most cases // we will wake up sooner than we have to. nextReqTime = nextBurstAt - (tRP + tRCD); // Update the stats and schedule the next request if (dram_pkt->isRead()) { ++readsThisTime; if (row_hit) readRowHits++; bytesReadDRAM += burstSize; perBankRdBursts[dram_pkt->bankId]++; // Update latency stats totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; masterReadTotalLat[dram_pkt->masterId()] += dram_pkt->readyTime - dram_pkt->entryTime; totBusLat += tBURST; totQLat += cmd_at - dram_pkt->entryTime; masterReadBytes[dram_pkt->masterId()] += dram_pkt->size; } else { ++writesThisTime; if (row_hit) writeRowHits++; bytesWritten += burstSize; perBankWrBursts[dram_pkt->bankId]++; masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size; masterWriteTotalLat[dram_pkt->masterId()] += dram_pkt->readyTime - dram_pkt->entryTime; } } void DRAMCtrl::processNextReqEvent() { // transition is handled by QoS algorithm if enabled if (turnPolicy) { // select bus state - only done if QoS algorithms are in use busStateNext = selectNextBusState(); } // detect bus state change bool switched_cmd_type = (busState != busStateNext); // record stats recordTurnaroundStats(); DPRINTF(DRAM, "QoS Turnarounds selected state %s %s\n", (busState==MemCtrl::READ)?"READ":"WRITE", switched_cmd_type?"[turnaround triggered]":""); if (switched_cmd_type) { if (busState == READ) { DPRINTF(DRAM, "Switching to writes after %d reads with %d reads " "waiting\n", readsThisTime, totalReadQueueSize); rdPerTurnAround.sample(readsThisTime); readsThisTime = 0; } else { DPRINTF(DRAM, "Switching to reads after %d writes with %d writes " "waiting\n", writesThisTime, totalWriteQueueSize); wrPerTurnAround.sample(writesThisTime); writesThisTime = 0; } } // updates current state busState = busStateNext; // check ranks for refresh/wakeup - uses busStateNext, so done after turnaround // decisions int busyRanks = 0; for (auto r : ranks) { if (!r->inRefIdleState()) { if (r->pwrState != PWR_SREF) { // rank is busy refreshing DPRINTF(DRAMState, "Rank %d is not available\n", r->rank); busyRanks++; // let the rank know that if it was waiting to drain, it // is now done and ready to proceed r->checkDrainDone(); } // check if we were in self-refresh and haven't started // to transition out if ((r->pwrState == PWR_SREF) && r->inLowPowerState) { DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank); // if we have commands queued to this rank and we don't have // a minimum number of active commands enqueued, // exit self-refresh if (r->forceSelfRefreshExit()) { DPRINTF(DRAMState, "rank %d was in self refresh and" " should wake up\n", r->rank); //wake up from self-refresh r->scheduleWakeUpEvent(tXS); // things are brought back into action once a refresh is // performed after self-refresh // continue with selection for other ranks } } } } if (busyRanks == ranksPerChannel) { // if all ranks are refreshing wait for them to finish // and stall this state machine without taking any further // action, and do not schedule a new nextReqEvent return; } // when we get here it is either a read or a write if (busState == READ) { // track if we should switch or not bool switch_to_writes = false; if (totalReadQueueSize == 0) { // In the case there is no read request to go next, // trigger writes if we have passed the low threshold (or // if we are draining) if (!(totalWriteQueueSize == 0) && (drainState() == DrainState::Draining || totalWriteQueueSize > writeLowThreshold)) { DPRINTF(DRAM, "Switching to writes due to read queue empty\n"); switch_to_writes = true; } else { // check if we are drained // not done draining until in PWR_IDLE state // ensuring all banks are closed and // have exited low power states if (drainState() == DrainState::Draining && respQueue.empty() && allRanksDrained()) { DPRINTF(Drain, "DRAM controller done draining\n"); signalDrainDone(); } // nothing to do, not even any point in scheduling an // event for the next request return; } } else { bool read_found = false; DRAMPacketQueue::iterator to_read; uint8_t prio = numPriorities(); for (auto queue = readQueue.rbegin(); queue != readQueue.rend(); ++queue) { prio--; DPRINTF(QOS, "DRAM controller checking READ queue [%d] priority [%d elements]\n", prio, queue->size()); // Figure out which read request goes next // If we are changing command type, incorporate the minimum // bus turnaround delay which will be tCS (different rank) case to_read = chooseNext((*queue), switched_cmd_type ? tCS : 0); if (to_read != queue->end()) { // candidate read found read_found = true; break; } } // if no read to an available rank is found then return // at this point. There could be writes to the available ranks // which are above the required threshold. However, to // avoid adding more complexity to the code, return and wait // for a refresh event to kick things into action again. if (!read_found) { DPRINTF(DRAM, "No Reads Found - exiting\n"); return; } auto dram_pkt = *to_read; assert(dram_pkt->rankRef.inRefIdleState()); doDRAMAccess(dram_pkt); // Every respQueue which will generate an event, increment count ++dram_pkt->rankRef.outstandingEvents; // sanity check assert(dram_pkt->size <= burstSize); assert(dram_pkt->readyTime >= curTick()); // log the response logResponse(MemCtrl::READ, (*to_read)->masterId(), dram_pkt->qosValue(), dram_pkt->getAddr(), 1, dram_pkt->readyTime - dram_pkt->entryTime); // Insert into response queue. It will be sent back to the // requester at its readyTime if (respQueue.empty()) { assert(!respondEvent.scheduled()); schedule(respondEvent, dram_pkt->readyTime); } else { assert(respQueue.back()->readyTime <= dram_pkt->readyTime); assert(respondEvent.scheduled()); } respQueue.push_back(dram_pkt); // we have so many writes that we have to transition if (totalWriteQueueSize > writeHighThreshold) { switch_to_writes = true; } // remove the request from the queue - the iterator is no longer valid . readQueue[dram_pkt->qosValue()].erase(to_read); } // switching to writes, either because the read queue is empty // and the writes have passed the low threshold (or we are // draining), or because the writes hit the hight threshold if (switch_to_writes) { // transition to writing busStateNext = WRITE; } } else { bool write_found = false; DRAMPacketQueue::iterator to_write; uint8_t prio = numPriorities(); for (auto queue = writeQueue.rbegin(); queue != writeQueue.rend(); ++queue) { prio--; DPRINTF(QOS, "DRAM controller checking WRITE queue [%d] priority [%d elements]\n", prio, queue->size()); // If we are changing command type, incorporate the minimum // bus turnaround delay to_write = chooseNext((*queue), switched_cmd_type ? std::min(tRTW, tCS) : 0); if (to_write != queue->end()) { write_found = true; break; } } // if there are no writes to a rank that is available to service // requests (i.e. rank is in refresh idle state) are found then // return. There could be reads to the available ranks. However, to // avoid adding more complexity to the code, return at this point and // wait for a refresh event to kick things into action again. if (!write_found) { DPRINTF(DRAM, "No Writes Found - exiting\n"); return; } auto dram_pkt = *to_write; assert(dram_pkt->rankRef.inRefIdleState()); // sanity check assert(dram_pkt->size <= burstSize); doDRAMAccess(dram_pkt); // removed write from queue, decrement count --dram_pkt->rankRef.writeEntries; // Schedule write done event to decrement event count // after the readyTime has been reached // Only schedule latest write event to minimize events // required; only need to ensure that final event scheduled covers // the time that writes are outstanding and bus is active // to holdoff power-down entry events if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) { schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); // New event, increment count ++dram_pkt->rankRef.outstandingEvents; } else if (dram_pkt->rankRef.writeDoneEvent.when() < dram_pkt->readyTime) { reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime); } isInWriteQueue.erase(burstAlign(dram_pkt->addr)); // log the response logResponse(MemCtrl::WRITE, dram_pkt->masterId(), dram_pkt->qosValue(), dram_pkt->getAddr(), 1, dram_pkt->readyTime - dram_pkt->entryTime); // remove the request from the queue - the iterator is no longer valid writeQueue[dram_pkt->qosValue()].erase(to_write); delete dram_pkt; // If we emptied the write queue, or got sufficiently below the // threshold (using the minWritesPerSwitch as the hysteresis) and // are not draining, or we have reads waiting and have done enough // writes, then switch to reads. bool below_threshold = totalWriteQueueSize + minWritesPerSwitch < writeLowThreshold; if (totalWriteQueueSize == 0 || (below_threshold && drainState() != DrainState::Draining) || (totalReadQueueSize && writesThisTime >= minWritesPerSwitch)) { // turn the bus back around for reads again busStateNext = READ; // note that the we switch back to reads also in the idle // case, which eventually will check for any draining and // also pause any further scheduling if there is really // nothing to do } } // It is possible that a refresh to another rank kicks things back into // action before reaching this point. if (!nextReqEvent.scheduled()) schedule(nextReqEvent, std::max(nextReqTime, curTick())); // If there is space available and we have writes waiting then let // them retry. This is done here to ensure that the retry does not // cause a nextReqEvent to be scheduled before we do so as part of // the next request processing if (retryWrReq && totalWriteQueueSize < writeBufferSize) { retryWrReq = false; port.sendRetryReq(); } } pair, bool> DRAMCtrl::minBankPrep(const DRAMPacketQueue& queue, Tick min_col_at) const { Tick min_act_at = MaxTick; vector bank_mask(ranksPerChannel, 0); // latest Tick for which ACT can occur without incurring additoinal // delay on the data bus const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick()); // Flag condition when burst can issue back-to-back with previous burst bool found_seamless_bank = false; // Flag condition when bank can be opened without incurring additional // delay on the data bus bool hidden_bank_prep = false; // determine if we have queued transactions targetting the // bank in question vector got_waiting(ranksPerChannel * banksPerRank, false); for (const auto& p : queue) { if (p->rankRef.inRefIdleState()) got_waiting[p->bankId] = true; } // Find command with optimal bank timing // Will prioritize commands that can issue seamlessly. for (int i = 0; i < ranksPerChannel; i++) { for (int j = 0; j < banksPerRank; j++) { uint16_t bank_id = i * banksPerRank + j; // if we have waiting requests for the bank, and it is // amongst the first available, update the mask if (got_waiting[bank_id]) { // make sure this rank is not currently refreshing. assert(ranks[i]->inRefIdleState()); // simplistic approximation of when the bank can issue // an activate, ignoring any rank-to-rank switching // cost in this calculation Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ? std::max(ranks[i]->banks[j].actAllowedAt, curTick()) : std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP; // When is the earliest the R/W burst can issue? const Tick col_allowed_at = (busState == READ) ? ranks[i]->banks[j].rdAllowedAt : ranks[i]->banks[j].wrAllowedAt; Tick col_at = std::max(col_allowed_at, act_at + tRCD); // bank can issue burst back-to-back (seamlessly) with // previous burst bool new_seamless_bank = col_at <= min_col_at; // if we found a new seamless bank or we have no // seamless banks, and got a bank with an earlier // activate time, it should be added to the bit mask if (new_seamless_bank || (!found_seamless_bank && act_at <= min_act_at)) { // if we did not have a seamless bank before, and // we do now, reset the bank mask, also reset it // if we have not yet found a seamless bank and // the activate time is smaller than what we have // seen so far if (!found_seamless_bank && (new_seamless_bank || act_at < min_act_at)) { std::fill(bank_mask.begin(), bank_mask.end(), 0); } found_seamless_bank |= new_seamless_bank; // ACT can occur 'behind the scenes' hidden_bank_prep = act_at <= hidden_act_max; // set the bit corresponding to the available bank replaceBits(bank_mask[i], j, j, 1); min_act_at = act_at; } } } } return make_pair(bank_mask, hidden_bank_prep); } DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p, int rank) : EventManager(&_memory), memory(_memory), pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE), pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE), refreshState(REF_IDLE), inLowPowerState(false), rank(rank), readEntries(0), writeEntries(0), outstandingEvents(0), wakeUpAllowedAt(0), power(_p, false), banks(_p->banks_per_rank), numBanksActive(0), actTicks(_p->activation_limit, 0), writeDoneEvent([this]{ processWriteDoneEvent(); }, name()), activateEvent([this]{ processActivateEvent(); }, name()), prechargeEvent([this]{ processPrechargeEvent(); }, name()), refreshEvent([this]{ processRefreshEvent(); }, name()), powerEvent([this]{ processPowerEvent(); }, name()), wakeUpEvent([this]{ processWakeUpEvent(); }, name()) { for (int b = 0; b < _p->banks_per_rank; b++) { banks[b].bank = b; // GDDR addressing of banks to BG is linear. // Here we assume that all DRAM generations address bank groups as // follows: if (_p->bank_groups_per_rank > 0) { // Simply assign lower bits to bank group in order to // rotate across bank groups as banks are incremented // e.g. with 4 banks per bank group and 16 banks total: // banks 0,4,8,12 are in bank group 0 // banks 1,5,9,13 are in bank group 1 // banks 2,6,10,14 are in bank group 2 // banks 3,7,11,15 are in bank group 3 banks[b].bankgr = b % _p->bank_groups_per_rank; } else { // No bank groups; simply assign to bank number banks[b].bankgr = b; } } } void DRAMCtrl::Rank::startup(Tick ref_tick) { assert(ref_tick > curTick()); pwrStateTick = curTick(); // kick off the refresh, and give ourselves enough time to // precharge schedule(refreshEvent, ref_tick); } void DRAMCtrl::Rank::suspend() { deschedule(refreshEvent); // Update the stats updatePowerStats(); // don't automatically transition back to LP state after next REF pwrStatePostRefresh = PWR_IDLE; } bool DRAMCtrl::Rank::isQueueEmpty() const { // check commmands in Q based on current bus direction bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0)) || ((memory.busStateNext == WRITE) && (writeEntries == 0)); return no_queued_cmds; } void DRAMCtrl::Rank::checkDrainDone() { // if this rank was waiting to drain it is now able to proceed to // precharge if (refreshState == REF_DRAIN) { DPRINTF(DRAM, "Refresh drain done, now precharging\n"); refreshState = REF_PD_EXIT; // hand control back to the refresh event loop schedule(refreshEvent, curTick()); } } void DRAMCtrl::Rank::flushCmdList() { // at the moment sort the list of commands and update the counters // for DRAMPower libray when doing a refresh sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime); auto next_iter = cmdList.begin(); // push to commands to DRAMPower for ( ; next_iter != cmdList.end() ; ++next_iter) { Command cmd = *next_iter; if (cmd.timeStamp <= curTick()) { // Move all commands at or before curTick to DRAMPower power.powerlib.doCommand(cmd.type, cmd.bank, divCeil(cmd.timeStamp, memory.tCK) - memory.timeStampOffset); } else { // done - found all commands at or before curTick() // next_iter references the 1st command after curTick break; } } // reset cmdList to only contain commands after curTick // if there are no commands after curTick, updated cmdList will be empty // in this case, next_iter is cmdList.end() cmdList.assign(next_iter, cmdList.end()); } void DRAMCtrl::Rank::processActivateEvent() { // we should transition to the active state as soon as any bank is active if (pwrState != PWR_ACT) // note that at this point numBanksActive could be back at // zero again due to a precharge scheduled in the future schedulePowerEvent(PWR_ACT, curTick()); } void DRAMCtrl::Rank::processPrechargeEvent() { // counter should at least indicate one outstanding request // for this precharge assert(outstandingEvents > 0); // precharge complete, decrement count --outstandingEvents; // if we reached zero, then special conditions apply as we track // if all banks are precharged for the power models if (numBanksActive == 0) { // no reads to this rank in the Q and no pending // RD/WR or refresh commands if (isQueueEmpty() && outstandingEvents == 0) { // should still be in ACT state since bank still open assert(pwrState == PWR_ACT); // All banks closed - switch to precharge power down state. DPRINTF(DRAMState, "Rank %d sleep at tick %d\n", rank, curTick()); powerDownSleep(PWR_PRE_PDN, curTick()); } else { // we should transition to the idle state when the last bank // is precharged schedulePowerEvent(PWR_IDLE, curTick()); } } } void DRAMCtrl::Rank::processWriteDoneEvent() { // counter should at least indicate one outstanding request // for this write assert(outstandingEvents > 0); // Write transfer on bus has completed // decrement per rank counter --outstandingEvents; } void DRAMCtrl::Rank::processRefreshEvent() { // when first preparing the refresh, remember when it was due if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) { // remember when the refresh is due refreshDueAt = curTick(); // proceed to drain refreshState = REF_DRAIN; // make nonzero while refresh is pending to ensure // power down and self-refresh are not entered ++outstandingEvents; DPRINTF(DRAM, "Refresh due\n"); } // let any scheduled read or write to the same rank go ahead, // after which it will // hand control back to this event loop if (refreshState == REF_DRAIN) { // if a request is at the moment being handled and this request is // accessing the current rank then wait for it to finish if ((rank == memory.activeRank) && (memory.nextReqEvent.scheduled())) { // hand control over to the request loop until it is // evaluated next DPRINTF(DRAM, "Refresh awaiting draining\n"); return; } else { refreshState = REF_PD_EXIT; } } // at this point, ensure that rank is not in a power-down state if (refreshState == REF_PD_EXIT) { // if rank was sleeping and we have't started exit process, // wake-up for refresh if (inLowPowerState) { DPRINTF(DRAM, "Wake Up for refresh\n"); // save state and return after refresh completes scheduleWakeUpEvent(memory.tXP); return; } else { refreshState = REF_PRE; } } // at this point, ensure that all banks are precharged if (refreshState == REF_PRE) { // precharge any active bank if (numBanksActive != 0) { // at the moment, we use a precharge all even if there is // only a single bank open DPRINTF(DRAM, "Precharging all\n"); // first determine when we can precharge Tick pre_at = curTick(); for (auto &b : banks) { // respect both causality and any existing bank // constraints, some banks could already have a // (auto) precharge scheduled pre_at = std::max(b.preAllowedAt, pre_at); } // make sure all banks per rank are precharged, and for those that // already are, update their availability Tick act_allowed_at = pre_at + memory.tRP; for (auto &b : banks) { if (b.openRow != Bank::NO_ROW) { memory.prechargeBank(*this, b, pre_at, false); } else { b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at); b.preAllowedAt = std::max(b.preAllowedAt, pre_at); } } // precharge all banks in rank cmdList.push_back(Command(MemCommand::PREA, 0, pre_at)); DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", divCeil(pre_at, memory.tCK) - memory.timeStampOffset, rank); } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) { // Banks are closed, have transitioned to IDLE state, and // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled DPRINTF(DRAM, "All banks already precharged, starting refresh\n"); // go ahead and kick the power state machine into gear since // we are already idle schedulePowerEvent(PWR_REF, curTick()); } else { // banks state is closed but haven't transitioned pwrState to IDLE // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled // should have outstanding precharge event in this case assert(prechargeEvent.scheduled()); // will start refresh when pwrState transitions to IDLE } assert(numBanksActive == 0); // wait for all banks to be precharged, at which point the // power state machine will transition to the idle state, and // automatically move to a refresh, at that point it will also // call this method to get the refresh event loop going again return; } // last but not least we perform the actual refresh if (refreshState == REF_START) { // should never get here with any banks active assert(numBanksActive == 0); assert(pwrState == PWR_REF); Tick ref_done_at = curTick() + memory.tRFC; for (auto &b : banks) { b.actAllowedAt = ref_done_at; } // at the moment this affects all ranks cmdList.push_back(Command(MemCommand::REF, 0, curTick())); // Update the stats updatePowerStats(); DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) - memory.timeStampOffset, rank); // Update for next refresh refreshDueAt += memory.tREFI; // make sure we did not wait so long that we cannot make up // for it if (refreshDueAt < ref_done_at) { fatal("Refresh was delayed so long we cannot catch up\n"); } // Run the refresh and schedule event to transition power states // when refresh completes refreshState = REF_RUN; schedule(refreshEvent, ref_done_at); return; } if (refreshState == REF_RUN) { // should never get here with any banks active assert(numBanksActive == 0); assert(pwrState == PWR_REF); assert(!powerEvent.scheduled()); if ((memory.drainState() == DrainState::Draining) || (memory.drainState() == DrainState::Drained)) { // if draining, do not re-enter low-power mode. // simply go to IDLE and wait schedulePowerEvent(PWR_IDLE, curTick()); } else { // At the moment, we sleep when the refresh ends and wait to be // woken up again if previously in a low-power state. if (pwrStatePostRefresh != PWR_IDLE) { // power State should be power Refresh assert(pwrState == PWR_REF); DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in " "power state %d before refreshing\n", rank, pwrStatePostRefresh); powerDownSleep(pwrState, curTick()); // Force PRE power-down if there are no outstanding commands // in Q after refresh. } else if (isQueueEmpty()) { // still have refresh event outstanding but there should // be no other events outstanding assert(outstandingEvents == 1); DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT" " in a low power state before refreshing\n", rank); powerDownSleep(PWR_PRE_PDN, curTick()); } else { // move to the idle power state once the refresh is done, this // will also move the refresh state machine to the refresh // idle state schedulePowerEvent(PWR_IDLE, curTick()); } } // At this point, we have completed the current refresh. // In the SREF bypass case, we do not get to this state in the // refresh STM and therefore can always schedule next event. // Compensate for the delay in actually performing the refresh // when scheduling the next one schedule(refreshEvent, refreshDueAt - memory.tRP); DPRINTF(DRAMState, "Refresh done at %llu and next refresh" " at %llu\n", curTick(), refreshDueAt); } } void DRAMCtrl::Rank::schedulePowerEvent(PowerState pwr_state, Tick tick) { // respect causality assert(tick >= curTick()); if (!powerEvent.scheduled()) { DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n", tick, pwr_state); // insert the new transition pwrStateTrans = pwr_state; schedule(powerEvent, tick); } else { panic("Scheduled power event at %llu to state %d, " "with scheduled event at %llu to %d\n", tick, pwr_state, powerEvent.when(), pwrStateTrans); } } void DRAMCtrl::Rank::powerDownSleep(PowerState pwr_state, Tick tick) { // if low power state is active low, schedule to active low power state. // in reality tCKE is needed to enter active low power. This is neglected // here and could be added in the future. if (pwr_state == PWR_ACT_PDN) { schedulePowerEvent(pwr_state, tick); // push command to DRAMPower cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick)); DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick, memory.tCK) - memory.timeStampOffset, rank); } else if (pwr_state == PWR_PRE_PDN) { // if low power state is precharge low, schedule to precharge low // power state. In reality tCKE is needed to enter active low power. // This is neglected here. schedulePowerEvent(pwr_state, tick); //push Command to DRAMPower cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, memory.tCK) - memory.timeStampOffset, rank); } else if (pwr_state == PWR_REF) { // if a refresh just occurred // transition to PRE_PDN now that all banks are closed // precharge power down requires tCKE to enter. For simplicity // this is not considered. schedulePowerEvent(PWR_PRE_PDN, tick); //push Command to DRAMPower cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick)); DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick, memory.tCK) - memory.timeStampOffset, rank); } else if (pwr_state == PWR_SREF) { // should only enter SREF after PRE-PD wakeup to do a refresh assert(pwrStatePostRefresh == PWR_PRE_PDN); // self refresh requires time tCKESR to enter. For simplicity, // this is not considered. schedulePowerEvent(PWR_SREF, tick); // push Command to DRAMPower cmdList.push_back(Command(MemCommand::SREN, 0, tick)); DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick, memory.tCK) - memory.timeStampOffset, rank); } // Ensure that we don't power-down and back up in same tick // Once we commit to PD entry, do it and wait for at least 1tCK // This could be replaced with tCKE if/when that is added to the model wakeUpAllowedAt = tick + memory.tCK; // Transitioning to a low power state, set flag inLowPowerState = true; } void DRAMCtrl::Rank::scheduleWakeUpEvent(Tick exit_delay) { Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt); DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n", rank, wake_up_tick); // if waking for refresh, hold previous state // else reset state back to IDLE if (refreshState == REF_PD_EXIT) { pwrStatePostRefresh = pwrState; } else { // don't automatically transition back to LP state after next REF pwrStatePostRefresh = PWR_IDLE; } // schedule wake-up with event to ensure entry has completed before // we try to wake-up schedule(wakeUpEvent, wake_up_tick); for (auto &b : banks) { // respect both causality and any existing bank // constraints, some banks could already have a // (auto) precharge scheduled b.wrAllowedAt = std::max(wake_up_tick + exit_delay, b.wrAllowedAt); b.rdAllowedAt = std::max(wake_up_tick + exit_delay, b.rdAllowedAt); b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt); b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt); } // Transitioning out of low power state, clear flag inLowPowerState = false; // push to DRAMPower // use pwrStateTrans for cases where we have a power event scheduled // to enter low power that has not yet been processed if (pwrStateTrans == PWR_ACT_PDN) { cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick)); DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick, memory.tCK) - memory.timeStampOffset, rank); } else if (pwrStateTrans == PWR_PRE_PDN) { cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick)); DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick, memory.tCK) - memory.timeStampOffset, rank); } else if (pwrStateTrans == PWR_SREF) { cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick)); DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick, memory.tCK) - memory.timeStampOffset, rank); } } void DRAMCtrl::Rank::processWakeUpEvent() { // Should be in a power-down or self-refresh state assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) || (pwrState == PWR_SREF)); // Check current state to determine transition state if (pwrState == PWR_ACT_PDN) { // banks still open, transition to PWR_ACT schedulePowerEvent(PWR_ACT, curTick()); } else { // transitioning from a precharge power-down or self-refresh state // banks are closed - transition to PWR_IDLE schedulePowerEvent(PWR_IDLE, curTick()); } } void DRAMCtrl::Rank::processPowerEvent() { assert(curTick() >= pwrStateTick); // remember where we were, and for how long Tick duration = curTick() - pwrStateTick; PowerState prev_state = pwrState; // update the accounting pwrStateTime[prev_state] += duration; // track to total idle time if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) || (prev_state == PWR_SREF)) { totalIdleTime += duration; } pwrState = pwrStateTrans; pwrStateTick = curTick(); // if rank was refreshing, make sure to start scheduling requests again if (prev_state == PWR_REF) { // bus IDLED prior to REF // counter should be one for refresh command only assert(outstandingEvents == 1); // REF complete, decrement count and go back to IDLE --outstandingEvents; refreshState = REF_IDLE; DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration); // if moving back to power-down after refresh if (pwrState != PWR_IDLE) { assert(pwrState == PWR_PRE_PDN); DPRINTF(DRAMState, "Switching to power down state after refreshing" " rank %d at %llu tick\n", rank, curTick()); } // completed refresh event, ensure next request is scheduled if (!memory.nextReqEvent.scheduled()) { DPRINTF(DRAM, "Scheduling next request after refreshing" " rank %d\n", rank); schedule(memory.nextReqEvent, curTick()); } } if ((pwrState == PWR_ACT) && (refreshState == REF_PD_EXIT)) { // have exited ACT PD assert(prev_state == PWR_ACT_PDN); // go back to REF event and close banks refreshState = REF_PRE; schedule(refreshEvent, curTick()); } else if (pwrState == PWR_IDLE) { DPRINTF(DRAMState, "All banks precharged\n"); if (prev_state == PWR_SREF) { // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState // continues to return false during tXS after SREF exit // Schedule a refresh which kicks things back into action // when it finishes refreshState = REF_SREF_EXIT; schedule(refreshEvent, curTick() + memory.tXS); } else { // if we have a pending refresh, and are now moving to // the idle state, directly transition to, or schedule refresh if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) { // ensure refresh is restarted only after final PRE command. // do not restart refresh if controller is in an intermediate // state, after PRE_PDN exit, when banks are IDLE but an // ACT is scheduled. if (!activateEvent.scheduled()) { // there should be nothing waiting at this point assert(!powerEvent.scheduled()); if (refreshState == REF_PD_EXIT) { // exiting PRE PD, will be in IDLE until tXP expires // and then should transition to PWR_REF state assert(prev_state == PWR_PRE_PDN); schedulePowerEvent(PWR_REF, curTick() + memory.tXP); } else if (refreshState == REF_PRE) { // can directly move to PWR_REF state and proceed below pwrState = PWR_REF; } } else { // must have PRE scheduled to transition back to IDLE // and re-kick off refresh assert(prechargeEvent.scheduled()); } } } } // transition to the refresh state and re-start refresh process // refresh state machine will schedule the next power state transition if (pwrState == PWR_REF) { // completed final PRE for refresh or exiting power-down assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT); // exited PRE PD for refresh, with no pending commands // bypass auto-refresh and go straight to SREF, where memory // will issue refresh immediately upon entry if (pwrStatePostRefresh == PWR_PRE_PDN && isQueueEmpty() && (memory.drainState() != DrainState::Draining) && (memory.drainState() != DrainState::Drained)) { DPRINTF(DRAMState, "Rank %d bypassing refresh and transitioning " "to self refresh at %11u tick\n", rank, curTick()); powerDownSleep(PWR_SREF, curTick()); // Since refresh was bypassed, remove event by decrementing count assert(outstandingEvents == 1); --outstandingEvents; // reset state back to IDLE temporarily until SREF is entered pwrState = PWR_IDLE; // Not bypassing refresh for SREF entry } else { DPRINTF(DRAMState, "Refreshing\n"); // there should be nothing waiting at this point assert(!powerEvent.scheduled()); // kick the refresh event loop into action again, and that // in turn will schedule a transition to the idle power // state once the refresh is done schedule(refreshEvent, curTick()); // Banks transitioned to IDLE, start REF refreshState = REF_START; } } } void DRAMCtrl::Rank::updatePowerStats() { // All commands up to refresh have completed // flush cmdList to DRAMPower flushCmdList(); // Call the function that calculates window energy at intermediate update // events like at refresh, stats dump as well as at simulation exit. // Window starts at the last time the calcWindowEnergy function was called // and is upto current time. power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) - memory.timeStampOffset); // Get the energy from DRAMPower Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy(); // The energy components inside the power lib are calculated over // the window so accumulate into the corresponding gem5 stat actEnergy += energy.act_energy * memory.devicesPerRank; preEnergy += energy.pre_energy * memory.devicesPerRank; readEnergy += energy.read_energy * memory.devicesPerRank; writeEnergy += energy.write_energy * memory.devicesPerRank; refreshEnergy += energy.ref_energy * memory.devicesPerRank; actBackEnergy += energy.act_stdby_energy * memory.devicesPerRank; preBackEnergy += energy.pre_stdby_energy * memory.devicesPerRank; actPowerDownEnergy += energy.f_act_pd_energy * memory.devicesPerRank; prePowerDownEnergy += energy.f_pre_pd_energy * memory.devicesPerRank; selfRefreshEnergy += energy.sref_energy * memory.devicesPerRank; // Accumulate window energy into the total energy. totalEnergy += energy.window_energy * memory.devicesPerRank; // Average power must not be accumulated but calculated over the time // since last stats reset. SimClock::Frequency is tick period not tick // frequency. // energy (pJ) 1e-9 // power (mW) = ----------- * ---------- // time (tick) tick_frequency averagePower = (totalEnergy.value() / (curTick() - memory.lastStatsResetTick)) * (SimClock::Frequency / 1000000000.0); } void DRAMCtrl::Rank::computeStats() { DPRINTF(DRAM,"Computing stats due to a dump callback\n"); // Update the stats updatePowerStats(); // final update of power state times pwrStateTime[pwrState] += (curTick() - pwrStateTick); pwrStateTick = curTick(); } void DRAMCtrl::Rank::resetStats() { // The only way to clear the counters in DRAMPower is to call // calcWindowEnergy function as that then calls clearCounters. The // clearCounters method itself is private. power.powerlib.calcWindowEnergy(divCeil(curTick(), memory.tCK) - memory.timeStampOffset); } void DRAMCtrl::Rank::regStats() { pwrStateTime .init(6) .name(name() + ".memoryStateTime") .desc("Time in different power states"); pwrStateTime.subname(0, "IDLE"); pwrStateTime.subname(1, "REF"); pwrStateTime.subname(2, "SREF"); pwrStateTime.subname(3, "PRE_PDN"); pwrStateTime.subname(4, "ACT"); pwrStateTime.subname(5, "ACT_PDN"); actEnergy .name(name() + ".actEnergy") .desc("Energy for activate commands per rank (pJ)"); preEnergy .name(name() + ".preEnergy") .desc("Energy for precharge commands per rank (pJ)"); readEnergy .name(name() + ".readEnergy") .desc("Energy for read commands per rank (pJ)"); writeEnergy .name(name() + ".writeEnergy") .desc("Energy for write commands per rank (pJ)"); refreshEnergy .name(name() + ".refreshEnergy") .desc("Energy for refresh commands per rank (pJ)"); actBackEnergy .name(name() + ".actBackEnergy") .desc("Energy for active background per rank (pJ)"); preBackEnergy .name(name() + ".preBackEnergy") .desc("Energy for precharge background per rank (pJ)"); actPowerDownEnergy .name(name() + ".actPowerDownEnergy") .desc("Energy for active power-down per rank (pJ)"); prePowerDownEnergy .name(name() + ".prePowerDownEnergy") .desc("Energy for precharge power-down per rank (pJ)"); selfRefreshEnergy .name(name() + ".selfRefreshEnergy") .desc("Energy for self refresh per rank (pJ)"); totalEnergy .name(name() + ".totalEnergy") .desc("Total energy per rank (pJ)"); averagePower .name(name() + ".averagePower") .desc("Core power per rank (mW)"); totalIdleTime .name(name() + ".totalIdleTime") .desc("Total Idle time Per DRAM Rank"); Stats::registerDumpCallback(new RankDumpCallback(this)); Stats::registerResetCallback(new RankResetCallback(this)); } void DRAMCtrl::regStats() { using namespace Stats; MemCtrl::regStats(); for (auto r : ranks) { r->regStats(); } registerResetCallback(new MemResetCallback(this)); readReqs .name(name() + ".readReqs") .desc("Number of read requests accepted"); writeReqs .name(name() + ".writeReqs") .desc("Number of write requests accepted"); readBursts .name(name() + ".readBursts") .desc("Number of DRAM read bursts, " "including those serviced by the write queue"); writeBursts .name(name() + ".writeBursts") .desc("Number of DRAM write bursts, " "including those merged in the write queue"); servicedByWrQ .name(name() + ".servicedByWrQ") .desc("Number of DRAM read bursts serviced by the write queue"); mergedWrBursts .name(name() + ".mergedWrBursts") .desc("Number of DRAM write bursts merged with an existing one"); neitherReadNorWrite .name(name() + ".neitherReadNorWriteReqs") .desc("Number of requests that are neither read nor write"); perBankRdBursts .init(banksPerRank * ranksPerChannel) .name(name() + ".perBankRdBursts") .desc("Per bank write bursts"); perBankWrBursts .init(banksPerRank * ranksPerChannel) .name(name() + ".perBankWrBursts") .desc("Per bank write bursts"); avgRdQLen .name(name() + ".avgRdQLen") .desc("Average read queue length when enqueuing") .precision(2); avgWrQLen .name(name() + ".avgWrQLen") .desc("Average write queue length when enqueuing") .precision(2); totQLat .name(name() + ".totQLat") .desc("Total ticks spent queuing"); totBusLat .name(name() + ".totBusLat") .desc("Total ticks spent in databus transfers"); totMemAccLat .name(name() + ".totMemAccLat") .desc("Total ticks spent from burst creation until serviced " "by the DRAM"); avgQLat .name(name() + ".avgQLat") .desc("Average queueing delay per DRAM burst") .precision(2); avgQLat = totQLat / (readBursts - servicedByWrQ); avgBusLat .name(name() + ".avgBusLat") .desc("Average bus latency per DRAM burst") .precision(2); avgBusLat = totBusLat / (readBursts - servicedByWrQ); avgMemAccLat .name(name() + ".avgMemAccLat") .desc("Average memory access latency per DRAM burst") .precision(2); avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ); numRdRetry .name(name() + ".numRdRetry") .desc("Number of times read queue was full causing retry"); numWrRetry .name(name() + ".numWrRetry") .desc("Number of times write queue was full causing retry"); readRowHits .name(name() + ".readRowHits") .desc("Number of row buffer hits during reads"); writeRowHits .name(name() + ".writeRowHits") .desc("Number of row buffer hits during writes"); readRowHitRate .name(name() + ".readRowHitRate") .desc("Row buffer hit rate for reads") .precision(2); readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100; writeRowHitRate .name(name() + ".writeRowHitRate") .desc("Row buffer hit rate for writes") .precision(2); writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100; readPktSize .init(ceilLog2(burstSize) + 1) .name(name() + ".readPktSize") .desc("Read request sizes (log2)"); writePktSize .init(ceilLog2(burstSize) + 1) .name(name() + ".writePktSize") .desc("Write request sizes (log2)"); rdQLenPdf .init(readBufferSize) .name(name() + ".rdQLenPdf") .desc("What read queue length does an incoming req see"); wrQLenPdf .init(writeBufferSize) .name(name() + ".wrQLenPdf") .desc("What write queue length does an incoming req see"); bytesPerActivate .init(maxAccessesPerRow ? maxAccessesPerRow : rowBufferSize) .name(name() + ".bytesPerActivate") .desc("Bytes accessed per row activation") .flags(nozero); rdPerTurnAround .init(readBufferSize) .name(name() + ".rdPerTurnAround") .desc("Reads before turning the bus around for writes") .flags(nozero); wrPerTurnAround .init(writeBufferSize) .name(name() + ".wrPerTurnAround") .desc("Writes before turning the bus around for reads") .flags(nozero); bytesReadDRAM .name(name() + ".bytesReadDRAM") .desc("Total number of bytes read from DRAM"); bytesReadWrQ .name(name() + ".bytesReadWrQ") .desc("Total number of bytes read from write queue"); bytesWritten .name(name() + ".bytesWritten") .desc("Total number of bytes written to DRAM"); bytesReadSys .name(name() + ".bytesReadSys") .desc("Total read bytes from the system interface side"); bytesWrittenSys .name(name() + ".bytesWrittenSys") .desc("Total written bytes from the system interface side"); avgRdBW .name(name() + ".avgRdBW") .desc("Average DRAM read bandwidth in MiByte/s") .precision(2); avgRdBW = (bytesReadDRAM / 1000000) / simSeconds; avgWrBW .name(name() + ".avgWrBW") .desc("Average achieved write bandwidth in MiByte/s") .precision(2); avgWrBW = (bytesWritten / 1000000) / simSeconds; avgRdBWSys .name(name() + ".avgRdBWSys") .desc("Average system read bandwidth in MiByte/s") .precision(2); avgRdBWSys = (bytesReadSys / 1000000) / simSeconds; avgWrBWSys .name(name() + ".avgWrBWSys") .desc("Average system write bandwidth in MiByte/s") .precision(2); avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds; peakBW .name(name() + ".peakBW") .desc("Theoretical peak bandwidth in MiByte/s") .precision(2); peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000; busUtil .name(name() + ".busUtil") .desc("Data bus utilization in percentage") .precision(2); busUtil = (avgRdBW + avgWrBW) / peakBW * 100; totGap .name(name() + ".totGap") .desc("Total gap between requests"); avgGap .name(name() + ".avgGap") .desc("Average gap between requests") .precision(2); avgGap = totGap / (readReqs + writeReqs); // Stats for DRAM Power calculation based on Micron datasheet busUtilRead .name(name() + ".busUtilRead") .desc("Data bus utilization in percentage for reads") .precision(2); busUtilRead = avgRdBW / peakBW * 100; busUtilWrite .name(name() + ".busUtilWrite") .desc("Data bus utilization in percentage for writes") .precision(2); busUtilWrite = avgWrBW / peakBW * 100; pageHitRate .name(name() + ".pageHitRate") .desc("Row buffer hit rate, read and write combined") .precision(2); pageHitRate = (writeRowHits + readRowHits) / (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100; // per-master bytes read and written to memory masterReadBytes .init(_system->maxMasters()) .name(name() + ".masterReadBytes") .desc("Per-master bytes read from memory") .flags(nozero | nonan); masterWriteBytes .init(_system->maxMasters()) .name(name() + ".masterWriteBytes") .desc("Per-master bytes write to memory") .flags(nozero | nonan); // per-master bytes read and written to memory rate masterReadRate.name(name() + ".masterReadRate") .desc("Per-master bytes read from memory rate (Bytes/sec)") .flags(nozero | nonan) .precision(12); masterReadRate = masterReadBytes/simSeconds; masterWriteRate .name(name() + ".masterWriteRate") .desc("Per-master bytes write to memory rate (Bytes/sec)") .flags(nozero | nonan) .precision(12); masterWriteRate = masterWriteBytes/simSeconds; masterReadAccesses .init(_system->maxMasters()) .name(name() + ".masterReadAccesses") .desc("Per-master read serviced memory accesses") .flags(nozero); masterWriteAccesses .init(_system->maxMasters()) .name(name() + ".masterWriteAccesses") .desc("Per-master write serviced memory accesses") .flags(nozero); masterReadTotalLat .init(_system->maxMasters()) .name(name() + ".masterReadTotalLat") .desc("Per-master read total memory access latency") .flags(nozero | nonan); masterReadAvgLat.name(name() + ".masterReadAvgLat") .desc("Per-master read average memory access latency") .flags(nonan) .precision(2); masterReadAvgLat = masterReadTotalLat/masterReadAccesses; masterWriteTotalLat .init(_system->maxMasters()) .name(name() + ".masterWriteTotalLat") .desc("Per-master write total memory access latency") .flags(nozero | nonan); masterWriteAvgLat.name(name() + ".masterWriteAvgLat") .desc("Per-master write average memory access latency") .flags(nonan) .precision(2); masterWriteAvgLat = masterWriteTotalLat/masterWriteAccesses; for (int i = 0; i < _system->maxMasters(); i++) { const std::string master = _system->getMasterName(i); masterReadBytes.subname(i, master); masterReadRate.subname(i, master); masterWriteBytes.subname(i, master); masterWriteRate.subname(i, master); masterReadAccesses.subname(i, master); masterWriteAccesses.subname(i, master); masterReadTotalLat.subname(i, master); masterReadAvgLat.subname(i, master); masterWriteTotalLat.subname(i, master); masterWriteAvgLat.subname(i, master); } } void DRAMCtrl::recvFunctional(PacketPtr pkt) { // rely on the abstract memory functionalAccess(pkt); } BaseSlavePort& DRAMCtrl::getSlavePort(const string &if_name, PortID idx) { if (if_name != "port") { return MemObject::getSlavePort(if_name, idx); } else { return port; } } DrainState DRAMCtrl::drain() { // if there is anything in any of our internal queues, keep track // of that as well if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() && allRanksDrained())) { DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," " resp: %d\n", totalWriteQueueSize, totalReadQueueSize, respQueue.size()); // the only queue that is not drained automatically over time // is the write queue, thus kick things into action if needed if (!totalWriteQueueSize && !nextReqEvent.scheduled()) { schedule(nextReqEvent, curTick()); } // also need to kick off events to exit self-refresh for (auto r : ranks) { // force self-refresh exit, which in turn will issue auto-refresh if (r->pwrState == PWR_SREF) { DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n", r->rank); r->scheduleWakeUpEvent(tXS); } } return DrainState::Draining; } else { return DrainState::Drained; } } bool DRAMCtrl::allRanksDrained() const { // true until proven false bool all_ranks_drained = true; for (auto r : ranks) { // then verify that the power state is IDLE ensuring all banks are // closed and rank is not in a low power state. Also verify that rank // is idle from a refresh point of view. all_ranks_drained = r->inPwrIdleState() && r->inRefIdleState() && all_ranks_drained; } return all_ranks_drained; } void DRAMCtrl::drainResume() { if (!isTimingMode && system()->isTimingMode()) { // if we switched to timing mode, kick things into action, // and behave as if we restored from a checkpoint startup(); } else if (isTimingMode && !system()->isTimingMode()) { // if we switch from timing mode, stop the refresh events to // not cause issues with KVM for (auto r : ranks) { r->suspend(); } } // update the mode isTimingMode = system()->isTimingMode(); } DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory) : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), memory(_memory) { } AddrRangeList DRAMCtrl::MemoryPort::getAddrRanges() const { AddrRangeList ranges; ranges.push_back(memory.getAddrRange()); return ranges; } void DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt) { pkt->pushLabel(memory.name()); if (!queue.trySatisfyFunctional(pkt)) { // Default implementation of SimpleTimingPort::recvFunctional() // calls recvAtomic() and throws away the latency; we can save a // little here by just not calculating the latency. memory.recvFunctional(pkt); } pkt->popLabel(); } Tick DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt) { return memory.recvAtomic(pkt); } bool DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt) { // pass it to the memory controller return memory.recvTimingReq(pkt); } DRAMCtrl* DRAMCtrlParams::create() { return new DRAMCtrl(this); }