From 041ea8107e4250a9c120a6fde11f3dc415c2fe6a Mon Sep 17 00:00:00 2001 From: Andreas Hansson Date: Thu, 17 Mar 2016 09:51:18 -0400 Subject: mem: Create a separate class for the cache write buffer This patch breaks out the cache write buffer into a separate class, without affecting any stats. The goal of the patch is to avoid encumbering the much-simpler write queue with the complex MSHR handling. In a follow on patch this simplification allows us to implement write combining. The WriteQueue gets its own class, but shares a common ancestor, the generic Queue, with the MSHRQueue. --- src/mem/cache/mshr_queue.cc | 146 +++----------------------------------------- 1 file changed, 10 insertions(+), 136 deletions(-) (limited to 'src/mem/cache/mshr_queue.cc') diff --git a/src/mem/cache/mshr_queue.cc b/src/mem/cache/mshr_queue.cc index ed6769349..f4992e176 100644 --- a/src/mem/cache/mshr_queue.cc +++ b/src/mem/cache/mshr_queue.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2013, 2015 ARM Limited + * Copyright (c) 2012-2013, 2015-2016 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -45,104 +45,15 @@ * Definition of MSHRQueue class functions. */ -#include "base/trace.hh" #include "mem/cache/mshr_queue.hh" -#include "debug/Drain.hh" using namespace std; MSHRQueue::MSHRQueue(const std::string &_label, - int num_entries, int reserve, int demand_reserve, - int _index) - : label(_label), numEntries(num_entries + reserve - 1), - numReserve(reserve), demandReserve(demand_reserve), - registers(numEntries), allocated(0), - inServiceEntries(0), index(_index) -{ - for (int i = 0; i < numEntries; ++i) { - registers[i].queue = this; - freeList.push_back(®isters[i]); - } -} - -MSHR * -MSHRQueue::findMatch(Addr blk_addr, bool is_secure) const -{ - for (const auto& mshr : allocatedList) { - // we ignore any MSHRs allocated for uncacheable accesses and - // simply ignore them when matching, in the cache we never - // check for matches when adding new uncacheable entries, and - // we do not want normal cacheable accesses being added to an - // MSHR serving an uncacheable access - if (!mshr->isUncacheable() && mshr->blkAddr == blk_addr && - mshr->isSecure == is_secure) { - return mshr; - } - } - return NULL; -} - -bool -MSHRQueue::findMatches(Addr blk_addr, bool is_secure, - vector& matches) const -{ - // Need an empty vector - assert(matches.empty()); - bool retval = false; - for (const auto& mshr : allocatedList) { - if (!mshr->isUncacheable() && mshr->blkAddr == blk_addr && - mshr->isSecure == is_secure) { - retval = true; - matches.push_back(mshr); - } - } - return retval; -} - - -bool -MSHRQueue::checkFunctional(PacketPtr pkt, Addr blk_addr) -{ - pkt->pushLabel(label); - for (const auto& mshr : allocatedList) { - if (mshr->blkAddr == blk_addr && mshr->checkFunctional(pkt)) { - pkt->popLabel(); - return true; - } - } - pkt->popLabel(); - return false; -} - - -MSHR * -MSHRQueue::findPending(Addr blk_addr, bool is_secure) const -{ - for (const auto& mshr : readyList) { - if (mshr->blkAddr == blk_addr && mshr->isSecure == is_secure) { - return mshr; - } - } - return NULL; -} - - -MSHR::Iterator -MSHRQueue::addToReadyList(MSHR *mshr) -{ - if (readyList.empty() || readyList.back()->readyTime <= mshr->readyTime) { - return readyList.insert(readyList.end(), mshr); - } - - for (auto i = readyList.begin(); i != readyList.end(); ++i) { - if ((*i)->readyTime > mshr->readyTime) { - return readyList.insert(i, mshr); - } - } - assert(false); - return readyList.end(); // keep stupid compilers happy -} - + int num_entries, int reserve, int demand_reserve) + : Queue(_label, num_entries, reserve), + demandReserve(demand_reserve) +{} MSHR * MSHRQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, @@ -161,34 +72,6 @@ MSHRQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, return mshr; } - -void -MSHRQueue::deallocate(MSHR *mshr) -{ - deallocateOne(mshr); -} - -MSHR::Iterator -MSHRQueue::deallocateOne(MSHR *mshr) -{ - MSHR::Iterator retval = allocatedList.erase(mshr->allocIter); - freeList.push_front(mshr); - allocated--; - if (mshr->inService) { - inServiceEntries--; - } else { - readyList.erase(mshr->readyIter); - } - mshr->deallocate(); - if (drainState() == DrainState::Draining && allocated == 0) { - // Notify the drain manager that we have completed draining if - // there are no other outstanding requests in this MSHR queue. - DPRINTF(Drain, "MSHRQueue now empty, signalling drained\n"); - signalDrainDone(); - } - return retval; -} - void MSHRQueue::moveToFront(MSHR *mshr) { @@ -202,12 +85,9 @@ MSHRQueue::moveToFront(MSHR *mshr) void MSHRQueue::markInService(MSHR *mshr, bool pending_modified_resp) { - if (mshr->markInService(pending_modified_resp)) { - deallocate(mshr); - } else { - readyList.erase(mshr->readyIter); - inServiceEntries += 1; - } + mshr->markInService(pending_modified_resp); + readyList.erase(mshr->readyIter); + _numInService += 1; } void @@ -215,7 +95,7 @@ MSHRQueue::markPending(MSHR *mshr) { assert(mshr->inService); mshr->inService = false; - --inServiceEntries; + --_numInService; /** * @ todo might want to add rerequests to front of pending list for * performance. @@ -232,15 +112,9 @@ MSHRQueue::forceDeallocateTarget(MSHR *mshr) mshr->popTarget(); // Delete mshr if no remaining targets if (!mshr->hasTargets() && !mshr->promoteDeferredTargets()) { - deallocateOne(mshr); + deallocate(mshr); } // Notify if MSHR queue no longer full return was_full && !isFull(); } - -DrainState -MSHRQueue::drain() -{ - return allocated == 0 ? DrainState::Drained : DrainState::Draining; -} -- cgit v1.2.3