summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Guillen <david.guillen@arm.com>2015-05-05 03:22:21 -0400
committerDavid Guillen <david.guillen@arm.com>2015-05-05 03:22:21 -0400
commit5287945a8bb98476a9326c5d9c51491cdc7212f2 (patch)
treec2263df9baa298e151c2fc68c22b9e3439f07edf /src
parentd0d933facc9085727c12f53de76a2cb879ded4c8 (diff)
downloadgem5-5287945a8bb98476a9326c5d9c51491cdc7212f2.tar.xz
mem: Remove templates in cache model
This patch changes the cache implementation to rely on virtual methods rather than using the replacement policy as a template argument. There is no impact on the simulation performance, and overall the changes make it easier to modify (and subclass) the cache and/or replacement policy.
Diffstat (limited to 'src')
-rw-r--r--src/mem/cache/base.cc16
-rw-r--r--src/mem/cache/blk.hh60
-rw-r--r--src/mem/cache/cache.cc8
-rw-r--r--src/mem/cache/cache.hh106
-rw-r--r--src/mem/cache/cache_impl.hh190
-rw-r--r--src/mem/cache/tags/base.hh33
-rw-r--r--src/mem/cache/tags/base_set_assoc.cc2
-rw-r--r--src/mem/cache/tags/base_set_assoc.hh23
-rw-r--r--src/mem/cache/tags/fa_lru.cc16
-rw-r--r--src/mem/cache/tags/fa_lru.hh33
-rw-r--r--src/mem/cache/tags/lru.cc10
-rw-r--r--src/mem/cache/tags/lru.hh6
-rw-r--r--src/mem/cache/tags/random_repl.cc10
-rw-r--r--src/mem/cache/tags/random_repl.hh6
14 files changed, 240 insertions, 279 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index b474aeedc..a2443eaad 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -783,21 +783,7 @@ BaseCache::drain(DrainManager *dm)
BaseCache *
BaseCacheParams::create()
{
- unsigned numSets = size / (assoc * system->cacheLineSize());
-
assert(tags);
- if (dynamic_cast<FALRU*>(tags)) {
- if (numSets != 1)
- fatal("Got FALRU tags with more than one set\n");
- return new Cache<FALRU>(this);
- } else if (dynamic_cast<LRU*>(tags)) {
- if (numSets == 1)
- warn("Consider using FALRU tags for a fully associative cache\n");
- return new Cache<LRU>(this);
- } else if (dynamic_cast<RandomRepl*>(tags)) {
- return new Cache<RandomRepl>(this);
- } else {
- fatal("No suitable tags selected\n");
- }
+ return new Cache(this);
}
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index 6c72817c0..09599b694 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -397,63 +397,19 @@ class CacheBlkPrintWrapper : public Printable
};
/**
- * Wrap a method and present it as a cache block visitor.
- *
- * For example the forEachBlk method in the tag arrays expects a
- * callable object/function as their parameter. This class wraps a
- * method in an object and presents callable object that adheres to
- * the cache block visitor protocol.
- */
-template <typename T, typename BlkType>
-class CacheBlkVisitorWrapper
-{
- public:
- typedef bool (T::*visitorPtr)(BlkType &blk);
-
- CacheBlkVisitorWrapper(T &_obj, visitorPtr _visitor)
- : obj(_obj), visitor(_visitor) {}
-
- bool operator()(BlkType &blk) {
- return (obj.*visitor)(blk);
- }
-
- private:
- T &obj;
- visitorPtr visitor;
-};
-
-/**
- * Cache block visitor that determines if there are dirty blocks in a
- * cache.
- *
- * Use with the forEachBlk method in the tag array to determine if the
- * array contains dirty blocks.
+ * Base class for cache block visitor, operating on the cache block
+ * base class (later subclassed for the various tag classes). This
+ * visitor class is used as part of the forEachBlk interface in the
+ * tag classes.
*/
-template <typename BlkType>
-class CacheBlkIsDirtyVisitor
+class CacheBlkVisitor
{
public:
- CacheBlkIsDirtyVisitor()
- : _isDirty(false) {}
-
- bool operator()(BlkType &blk) {
- if (blk.isDirty()) {
- _isDirty = true;
- return false;
- } else {
- return true;
- }
- }
- /**
- * Does the array contain a dirty line?
- *
- * \return true if yes, false otherwise.
- */
- bool isDirty() const { return _isDirty; };
+ CacheBlkVisitor() {}
+ virtual ~CacheBlkVisitor() {}
- private:
- bool _isDirty;
+ virtual bool operator()(CacheBlk &blk) = 0;
};
#endif //__CACHE_BLK_HH__
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index a5fa0e60d..2bf5a260c 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -41,11 +41,3 @@
#include "mem/cache/tags/random_repl.hh"
#include "mem/cache/cache_impl.hh"
-// Template Instantiations
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
-
-template class Cache<FALRU>;
-template class Cache<LRU>;
-template class Cache<RandomRepl>;
-
-#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index 7971c6654..24a067ece 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -56,6 +56,7 @@
#include "mem/cache/base.hh"
#include "mem/cache/blk.hh"
#include "mem/cache/mshr.hh"
+#include "mem/cache/tags/base.hh"
#include "sim/eventq.hh"
//Forward decleration
@@ -66,17 +67,14 @@ class BasePrefetcher;
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
*/
-template <class TagStore>
class Cache : public BaseCache
{
public:
- /** Define the type of cache block to use. */
- typedef typename TagStore::BlkType BlkType;
- /** A typedef for a list of BlkType pointers. */
- typedef typename TagStore::BlkList BlkList;
+
+ /** A typedef for a list of CacheBlk pointers. */
+ typedef std::list<CacheBlk*> BlkList;
protected:
- typedef CacheBlkVisitorWrapper<Cache<TagStore>, BlkType> WrappedBlkVisitor;
/**
* The CPU-side port extends the base cache slave port with access
@@ -87,7 +85,7 @@ class Cache : public BaseCache
private:
// a pointer to our specific cache implementation
- Cache<TagStore> *cache;
+ Cache *cache;
protected:
@@ -103,7 +101,7 @@ class Cache : public BaseCache
public:
- CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
+ CpuSidePort(const std::string &_name, Cache *_cache,
const std::string &_label);
};
@@ -119,12 +117,12 @@ class Cache : public BaseCache
protected:
- Cache<TagStore> &cache;
+ Cache &cache;
SnoopRespPacketQueue &snoopRespQueue;
public:
- CacheReqPacketQueue(Cache<TagStore> &cache, MasterPort &port,
+ CacheReqPacketQueue(Cache &cache, MasterPort &port,
SnoopRespPacketQueue &snoop_resp_queue,
const std::string &label) :
ReqPacketQueue(cache, port, label), cache(cache),
@@ -153,7 +151,7 @@ class Cache : public BaseCache
SnoopRespPacketQueue _snoopRespQueue;
// a pointer to our specific cache implementation
- Cache<TagStore> *cache;
+ Cache *cache;
protected:
@@ -167,18 +165,18 @@ class Cache : public BaseCache
public:
- MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
+ MemSidePort(const std::string &_name, Cache *_cache,
const std::string &_label);
};
/** Tag and data Storage */
- TagStore *tags;
+ BaseTags *tags;
/** Prefetcher */
BasePrefetcher *prefetcher;
/** Temporary cache block for occasional transitory use */
- BlkType *tempBlock;
+ CacheBlk *tempBlock;
/**
* This cache should allocate a block on a line-sized write miss.
@@ -210,13 +208,13 @@ class Cache : public BaseCache
* @param writebacks List for any writebacks that need to be performed.
* @return Boolean indicating whether the request was satisfied.
*/
- bool access(PacketPtr pkt, BlkType *&blk,
+ bool access(PacketPtr pkt, CacheBlk *&blk,
Cycles &lat, PacketList &writebacks);
/**
*Handle doing the Compare and Swap function for SPARC.
*/
- void cmpAndSwap(BlkType *blk, PacketPtr pkt);
+ void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
/**
* Find a block frame for new block at address addr targeting the
@@ -225,7 +223,7 @@ class Cache : public BaseCache
* list. Return free block frame. May return NULL if there are
* no replaceable blocks at the moment.
*/
- BlkType *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
+ CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
/**
* Populates a cache block and handles all outstanding requests for the
@@ -236,7 +234,7 @@ class Cache : public BaseCache
* @param writebacks List for any writebacks that need to be performed.
* @return Pointer to the new cache block.
*/
- BlkType *handleFill(PacketPtr pkt, BlkType *blk,
+ CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
PacketList &writebacks);
@@ -287,10 +285,10 @@ class Cache : public BaseCache
*/
void functionalAccess(PacketPtr pkt, bool fromCpuSide);
- void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
+ void satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
bool deferred_response = false,
bool pending_downgrade = false);
- bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk);
+ bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, CacheBlk *blk);
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
bool already_copied, bool pending_inval);
@@ -300,7 +298,7 @@ class Cache : public BaseCache
* @param blk The cache block being snooped.
* @param new_state The new coherence state for the block.
*/
- void handleSnoop(PacketPtr ptk, BlkType *blk,
+ void handleSnoop(PacketPtr ptk, CacheBlk *blk,
bool is_timing, bool is_deferred, bool pending_inval);
/**
@@ -308,7 +306,7 @@ class Cache : public BaseCache
* @param blk The block to writeback.
* @return The writeback request for the block.
*/
- PacketPtr writebackBlk(BlkType *blk);
+ PacketPtr writebackBlk(CacheBlk *blk);
void memWriteback();
@@ -321,7 +319,7 @@ class Cache : public BaseCache
*
* \return Always returns true.
*/
- bool writebackVisitor(BlkType &blk);
+ bool writebackVisitor(CacheBlk &blk);
/**
* Cache block visitor that invalidates all blocks in the cache.
*
@@ -329,7 +327,7 @@ class Cache : public BaseCache
*
* \return Always returns true.
*/
- bool invalidateVisitor(BlkType &blk);
+ bool invalidateVisitor(CacheBlk &blk);
/**
* Squash all requests associated with specified thread.
@@ -349,7 +347,7 @@ class Cache : public BaseCache
* @return A new Packet containing the request, or NULL if the
* current request in cpu_pkt should just be forwarded on.
*/
- PacketPtr getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
+ PacketPtr getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
bool needsExclusive) const;
/**
@@ -417,4 +415,62 @@ class Cache : public BaseCache
void unserialize(Checkpoint *cp, const std::string &section);
};
+/**
+ * Wrap a method and present it as a cache block visitor.
+ *
+ * For example the forEachBlk method in the tag arrays expects a
+ * callable object/function as their parameter. This class wraps a
+ * method in an object and presents callable object that adheres to
+ * the cache block visitor protocol.
+ */
+class CacheBlkVisitorWrapper : public CacheBlkVisitor
+{
+ public:
+ typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
+
+ CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
+ : cache(_cache), visitor(_visitor) {}
+
+ bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE {
+ return (cache.*visitor)(blk);
+ }
+
+ private:
+ Cache &cache;
+ VisitorPtr visitor;
+};
+
+/**
+ * Cache block visitor that determines if there are dirty blocks in a
+ * cache.
+ *
+ * Use with the forEachBlk method in the tag array to determine if the
+ * array contains dirty blocks.
+ */
+class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
+{
+ public:
+ CacheBlkIsDirtyVisitor()
+ : _isDirty(false) {}
+
+ bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE {
+ if (blk.isDirty()) {
+ _isDirty = true;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ /**
+ * Does the array contain a dirty line?
+ *
+ * \return true if yes, false otherwise.
+ */
+ bool isDirty() const { return _isDirty; };
+
+ private:
+ bool _isDirty;
+};
+
#endif // __CACHE_HH__
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index b897a8467..a530001ae 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -65,15 +65,14 @@
#include "mem/cache/mshr.hh"
#include "sim/sim_exit.hh"
-template<class TagStore>
-Cache<TagStore>::Cache(const Params *p)
+Cache::Cache(const Params *p)
: BaseCache(p),
- tags(dynamic_cast<TagStore*>(p->tags)),
+ tags(p->tags),
prefetcher(p->prefetcher),
doFastWrites(true),
prefetchOnAccess(p->prefetch_on_access)
{
- tempBlock = new BlkType();
+ tempBlock = new CacheBlk();
tempBlock->data = new uint8_t[blkSize];
cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
@@ -86,8 +85,7 @@ Cache<TagStore>::Cache(const Params *p)
prefetcher->setCache(this);
}
-template<class TagStore>
-Cache<TagStore>::~Cache()
+Cache::~Cache()
{
delete [] tempBlock->data;
delete tempBlock;
@@ -96,16 +94,14 @@ Cache<TagStore>::~Cache()
delete memSidePort;
}
-template<class TagStore>
void
-Cache<TagStore>::regStats()
+Cache::regStats()
{
BaseCache::regStats();
}
-template<class TagStore>
void
-Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
+Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
{
assert(pkt->isRequest());
@@ -145,11 +141,9 @@ Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
}
-template<class TagStore>
void
-Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
- bool deferred_response,
- bool pending_downgrade)
+Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
+ bool deferred_response, bool pending_downgrade)
{
assert(pkt->isRequest());
@@ -254,9 +248,8 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
/////////////////////////////////////////////////////
-template<class TagStore>
void
-Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp)
+Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
{
markInServiceInternal(mshr, pending_dirty_resp);
#if 0
@@ -272,9 +265,8 @@ Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp)
}
-template<class TagStore>
void
-Cache<TagStore>::squash(int threadNum)
+Cache::squash(int threadNum)
{
bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
@@ -300,10 +292,9 @@ Cache<TagStore>::squash(int threadNum)
//
/////////////////////////////////////////////////////
-template<class TagStore>
bool
-Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
- Cycles &lat, PacketList &writebacks)
+Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
+ PacketList &writebacks)
{
// sanity check
assert(pkt->isRequest());
@@ -319,7 +310,7 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
tags->clearLocks();
// flush and invalidate any existing block
- BlkType *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
+ CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
if (old_blk && old_blk->isValid()) {
if (old_blk->isDirty())
writebacks.push_back(writebackBlk(old_blk));
@@ -403,9 +394,8 @@ class ForwardResponseRecord : public Packet::SenderState
ForwardResponseRecord() {}
};
-template<class TagStore>
void
-Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt)
+Cache::recvTimingSnoopResp(PacketPtr pkt)
{
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@@ -442,9 +432,8 @@ Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt)
memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
}
-template<class TagStore>
void
-Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt)
+Cache::promoteWholeLineWrites(PacketPtr pkt)
{
// Cache line clearing instructions
if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
@@ -455,9 +444,8 @@ Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt)
}
}
-template<class TagStore>
bool
-Cache<TagStore>::recvTimingReq(PacketPtr pkt)
+Cache::recvTimingReq(PacketPtr pkt)
{
DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
//@todo Add back in MemDebug Calls
@@ -540,7 +528,7 @@ Cache<TagStore>::recvTimingReq(PacketPtr pkt)
// We use lookupLatency here because it is used to specify the latency
// to access.
Cycles lat = lookupLatency;
- BlkType *blk = NULL;
+ CacheBlk *blk = NULL;
bool satisfied = false;
{
PacketList writebacks;
@@ -777,10 +765,9 @@ Cache<TagStore>::recvTimingReq(PacketPtr pkt)
// See comment in cache.hh.
-template<class TagStore>
PacketPtr
-Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
- bool needsExclusive) const
+Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
+ bool needsExclusive) const
{
bool blkValid = blk && blk->isValid();
@@ -840,9 +827,8 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
}
-template<class TagStore>
Tick
-Cache<TagStore>::recvAtomic(PacketPtr pkt)
+Cache::recvAtomic(PacketPtr pkt)
{
// We are in atomic mode so we pay just for lookupLatency here.
Cycles lat = lookupLatency;
@@ -860,7 +846,7 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
- BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
if (blk && blk->isValid()) {
tags->invalidate(blk);
blk->invalidate();
@@ -887,7 +873,7 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
// writebacks... that would mean that someone used an atomic
// access in timing mode
- BlkType *blk = NULL;
+ CacheBlk *blk = NULL;
PacketList writebacks;
bool satisfied = access(pkt, blk, lat, writebacks);
@@ -990,9 +976,8 @@ Cache<TagStore>::recvAtomic(PacketPtr pkt)
}
-template<class TagStore>
void
-Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
+Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
{
if (system->bypassCaches()) {
// Packets from the memory side are snoop request and
@@ -1007,7 +992,7 @@ Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
Addr blk_addr = blockAlign(pkt->getAddr());
bool is_secure = pkt->isSecure();
- BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
pkt->pushLabel(name());
@@ -1067,9 +1052,8 @@ Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
/////////////////////////////////////////////////////
-template<class TagStore>
void
-Cache<TagStore>::recvTimingResp(PacketPtr pkt)
+Cache::recvTimingResp(PacketPtr pkt)
{
assert(pkt->isResponse());
@@ -1099,7 +1083,7 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
// Initial target is used just for stats
MSHR::Target *initial_tgt = mshr->getTarget();
- BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
Tick miss_latency = curTick() - initial_tgt->recvTime;
PacketList writebacks;
@@ -1328,12 +1312,8 @@ Cache<TagStore>::recvTimingResp(PacketPtr pkt)
delete pkt;
}
-
-
-
-template<class TagStore>
PacketPtr
-Cache<TagStore>::writebackBlk(BlkType *blk)
+Cache::writebackBlk(CacheBlk *blk)
{
assert(blk && blk->isValid() && blk->isDirty());
@@ -1360,35 +1340,31 @@ Cache<TagStore>::writebackBlk(BlkType *blk)
return writeback;
}
-template<class TagStore>
void
-Cache<TagStore>::memWriteback()
+Cache::memWriteback()
{
- WrappedBlkVisitor visitor(*this, &Cache<TagStore>::writebackVisitor);
+ CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
tags->forEachBlk(visitor);
}
-template<class TagStore>
void
-Cache<TagStore>::memInvalidate()
+Cache::memInvalidate()
{
- WrappedBlkVisitor visitor(*this, &Cache<TagStore>::invalidateVisitor);
+ CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
tags->forEachBlk(visitor);
}
-template<class TagStore>
bool
-Cache<TagStore>::isDirty() const
+Cache::isDirty() const
{
- CacheBlkIsDirtyVisitor<BlkType> visitor;
+ CacheBlkIsDirtyVisitor visitor;
tags->forEachBlk(visitor);
return visitor.isDirty();
}
-template<class TagStore>
bool
-Cache<TagStore>::writebackVisitor(BlkType &blk)
+Cache::writebackVisitor(CacheBlk &blk)
{
if (blk.isDirty()) {
assert(blk.isValid());
@@ -1408,9 +1384,8 @@ Cache<TagStore>::writebackVisitor(BlkType &blk)
return true;
}
-template<class TagStore>
bool
-Cache<TagStore>::invalidateVisitor(BlkType &blk)
+Cache::invalidateVisitor(CacheBlk &blk)
{
if (blk.isDirty())
@@ -1418,19 +1393,17 @@ Cache<TagStore>::invalidateVisitor(BlkType &blk)
if (blk.isValid()) {
assert(!blk.isDirty());
- tags->invalidate(dynamic_cast< BlkType *>(&blk));
+ tags->invalidate(&blk);
blk.invalidate();
}
return true;
}
-template<class TagStore>
-typename Cache<TagStore>::BlkType*
-Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
- PacketList &writebacks)
+CacheBlk*
+Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
{
- BlkType *blk = tags->findVictim(addr);
+ CacheBlk *blk = tags->findVictim(addr);
if (blk->isValid()) {
Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
@@ -1465,10 +1438,8 @@ Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
// is called by both atomic and timing-mode accesses, and in atomic
// mode we don't mess with the write buffer (we just perform the
// writebacks atomically once the original request is complete).
-template<class TagStore>
-typename Cache<TagStore>::BlkType*
-Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
- PacketList &writebacks)
+CacheBlk*
+Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks)
{
assert(pkt->isResponse() || pkt->isWriteInvalidate());
Addr addr = pkt->getAddr();
@@ -1556,11 +1527,9 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
//
/////////////////////////////////////////////////////
-template<class TagStore>
void
-Cache<TagStore>::
-doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
- bool already_copied, bool pending_inval)
+Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
+ bool already_copied, bool pending_inval)
{
// sanity check
assert(req_pkt->isRequest());
@@ -1604,11 +1573,9 @@ doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
}
-template<class TagStore>
void
-Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
- bool is_timing, bool is_deferred,
- bool pending_inval)
+Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
+ bool is_deferred, bool pending_inval)
{
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@@ -1745,9 +1712,8 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
}
-template<class TagStore>
void
-Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
+Cache::recvTimingSnoopReq(PacketPtr pkt)
{
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
@@ -1778,7 +1744,7 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
}
bool is_secure = pkt->isSecure();
- BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
@@ -1849,18 +1815,16 @@ Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
handleSnoop(pkt, blk, true, false, false);
}
-template<class TagStore>
bool
-Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
+Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
{
// Express snoop responses from master to slave, e.g., from L1 to L2
cache->recvTimingSnoopResp(pkt);
return true;
}
-template<class TagStore>
Tick
-Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt)
+Cache::recvAtomicSnoop(PacketPtr pkt)
{
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
@@ -1871,16 +1835,15 @@ Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt)
return 0;
}
- BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
handleSnoop(pkt, blk, false, false, false);
// We consider forwardLatency here because a snoop occurs in atomic mode
return forwardLatency * clockPeriod();
}
-template<class TagStore>
MSHR *
-Cache<TagStore>::getNextMSHR()
+Cache::getNextMSHR()
{
// Check both MSHR queue and write buffer for potential requests,
// note that null does not mean there is no request, it could
@@ -1962,9 +1925,8 @@ Cache<TagStore>::getNextMSHR()
}
-template<class TagStore>
PacketPtr
-Cache<TagStore>::getTimingPacket()
+Cache::getTimingPacket()
{
MSHR *mshr = getNextMSHR();
@@ -1984,7 +1946,7 @@ Cache<TagStore>::getTimingPacket()
assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
pkt = tgt_pkt;
} else {
- BlkType *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
+ CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
if (tgt_pkt->cmd == MemCmd::HardPFReq) {
// We need to check the caches above us to verify that
@@ -2056,9 +2018,8 @@ Cache<TagStore>::getTimingPacket()
}
-template<class TagStore>
Tick
-Cache<TagStore>::nextMSHRReadyTime() const
+Cache::nextMSHRReadyTime() const
{
Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
writeBuffer.nextMSHRReadyTime());
@@ -2073,9 +2034,8 @@ Cache<TagStore>::nextMSHRReadyTime() const
return nextReady;
}
-template<class TagStore>
void
-Cache<TagStore>::serialize(std::ostream &os)
+Cache::serialize(std::ostream &os)
{
bool dirty(isDirty());
@@ -2094,9 +2054,8 @@ Cache<TagStore>::serialize(std::ostream &os)
SERIALIZE_SCALAR(bad_checkpoint);
}
-template<class TagStore>
void
-Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
+Cache::unserialize(Checkpoint *cp, const std::string &section)
{
bool bad_checkpoint;
UNSERIALIZE_SCALAR(bad_checkpoint);
@@ -2113,16 +2072,14 @@ Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
//
///////////////
-template<class TagStore>
AddrRangeList
-Cache<TagStore>::CpuSidePort::getAddrRanges() const
+Cache::CpuSidePort::getAddrRanges() const
{
return cache->getAddrRanges();
}
-template<class TagStore>
bool
-Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
+Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
{
assert(!cache->system->bypassCaches());
@@ -2150,24 +2107,21 @@ Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
return success;
}
-template<class TagStore>
Tick
-Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
+Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
{
return cache->recvAtomic(pkt);
}
-template<class TagStore>
void
-Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
+Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
{
// functional request
cache->functionalAccess(pkt, true);
}
-template<class TagStore>
-Cache<TagStore>::
-CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
+Cache::
+CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
const std::string &_label)
: BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
{
@@ -2179,33 +2133,29 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
//
///////////////
-template<class TagStore>
bool
-Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
+Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
{
cache->recvTimingResp(pkt);
return true;
}
// Express snooping requests to memside port
-template<class TagStore>
void
-Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
+Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
{
// handle snooping requests
cache->recvTimingSnoopReq(pkt);
}
-template<class TagStore>
Tick
-Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
+Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
{
return cache->recvAtomicSnoop(pkt);
}
-template<class TagStore>
void
-Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
+Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
{
// functional snoop (note that in contrast to atomic we don't have
// a specific functionalSnoop method, as they have the same
@@ -2213,9 +2163,8 @@ Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
cache->functionalAccess(pkt, false);
}
-template<class TagStore>
void
-Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
+Cache::CacheReqPacketQueue::sendDeferredPacket()
{
// sanity check
assert(!waitingOnRetry);
@@ -2296,9 +2245,8 @@ Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
}
}
-template<class TagStore>
-Cache<TagStore>::
-MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
+Cache::
+MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
const std::string &_label)
: BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
_reqQueue(*_cache, *this, _snoopRespQueue, _label),
diff --git a/src/mem/cache/tags/base.hh b/src/mem/cache/tags/base.hh
index 03b6cfed8..e4c0f68d8 100644
--- a/src/mem/cache/tags/base.hh
+++ b/src/mem/cache/tags/base.hh
@@ -53,6 +53,7 @@
#include "base/callback.hh"
#include "base/statistics.hh"
+#include "mem/cache/blk.hh"
#include "params/BaseTags.hh"
#include "sim/clocked_object.hh"
@@ -179,6 +180,38 @@ class BaseTags : public ClockedObject
* Print all tags used
*/
virtual std::string print() const = 0;
+
+ /**
+ * Find a block using the memory address
+ */
+ virtual CacheBlk * findBlock(Addr addr, bool is_secure) const = 0;
+
+ /**
+ * Calculate the block offset of an address.
+ * @param addr the address to get the offset of.
+ * @return the block offset.
+ */
+ int extractBlkOffset(Addr addr) const
+ {
+ return (addr & (Addr)(blkSize-1));
+ }
+
+ virtual void invalidate(CacheBlk *blk) = 0;
+
+ virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ int context_src) = 0;
+
+ virtual Addr extractTag(Addr addr) const = 0;
+
+ virtual void insertBlock(PacketPtr pkt, CacheBlk *blk) = 0;
+
+ virtual Addr regenerateBlkAddr(Addr tag, unsigned set) const = 0;
+
+ virtual CacheBlk* findVictim(Addr addr) = 0;
+
+ virtual int extractSet(Addr addr) const = 0;
+
+ virtual void forEachBlk(CacheBlkVisitor &visitor) = 0;
};
class BaseTagsCallback : public Callback
diff --git a/src/mem/cache/tags/base_set_assoc.cc b/src/mem/cache/tags/base_set_assoc.cc
index 3c8371edb..c5ef9cc4b 100644
--- a/src/mem/cache/tags/base_set_assoc.cc
+++ b/src/mem/cache/tags/base_set_assoc.cc
@@ -119,7 +119,7 @@ BaseSetAssoc::~BaseSetAssoc()
delete [] sets;
}
-BaseSetAssoc::BlkType*
+CacheBlk*
BaseSetAssoc::findBlock(Addr addr, bool is_secure) const
{
Addr tag = extractTag(addr);
diff --git a/src/mem/cache/tags/base_set_assoc.hh b/src/mem/cache/tags/base_set_assoc.hh
index 0107aafaf..79cfe756f 100644
--- a/src/mem/cache/tags/base_set_assoc.hh
+++ b/src/mem/cache/tags/base_set_assoc.hh
@@ -149,7 +149,7 @@ public:
* Invalidate the given block.
* @param blk The block to invalidate.
*/
- void invalidate(BlkType *blk)
+ void invalidate(CacheBlk *blk)
{
assert(blk);
assert(blk->isValid());
@@ -172,7 +172,7 @@ public:
* @param lat The access latency.
* @return Pointer to the cache block if found.
*/
- BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src)
{
Addr tag = extractTag(addr);
@@ -212,7 +212,7 @@ public:
* @param asid The address space ID.
* @return Pointer to the cache block if found.
*/
- BlkType* findBlock(Addr addr, bool is_secure) const;
+ CacheBlk* findBlock(Addr addr, bool is_secure) const;
/**
* Find an invalid block to evict for the address provided.
@@ -221,7 +221,7 @@ public:
* @param addr The addr to a find a replacement candidate for.
* @return The candidate block.
*/
- BlkType* findVictim(Addr addr) const
+ CacheBlk* findVictim(Addr addr)
{
BlkType *blk = NULL;
int set = extractSet(addr);
@@ -242,7 +242,7 @@ public:
* @param pkt Packet holding the address to update
* @param blk The block to update.
*/
- void insertBlock(PacketPtr pkt, BlkType *blk)
+ void insertBlock(PacketPtr pkt, CacheBlk *blk)
{
Addr addr = pkt->getAddr();
MasterID master_id = pkt->req->masterId();
@@ -312,16 +312,6 @@ public:
}
/**
- * Get the block offset from an address.
- * @param addr The address to get the offset of.
- * @return The block offset.
- */
- int extractBlkOffset(Addr addr) const
- {
- return (addr & blkMask);
- }
-
- /**
* Align an address to the block size.
* @param addr the address to align.
* @return The block address.
@@ -375,8 +365,7 @@ public:
*
* \param visitor Visitor to call on each block.
*/
- template <typename V>
- void forEachBlk(V &visitor) {
+ void forEachBlk(CacheBlkVisitor &visitor) M5_ATTR_OVERRIDE {
for (unsigned i = 0; i < numSets * assoc; ++i) {
if (!visitor(blks[i]))
return;
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index ffe2cbf25..8d4f75382 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -161,13 +161,19 @@ FALRU::hashLookup(Addr addr) const
}
void
-FALRU::invalidate(FALRU::BlkType *blk)
+FALRU::invalidate(CacheBlk *blk)
{
assert(blk);
tagsInUse--;
}
-FALRUBlk*
+CacheBlk*
+FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src)
+{
+ return accessBlock(addr, is_secure, lat, context_src, 0);
+}
+
+CacheBlk*
FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src,
int *inCache)
{
@@ -206,7 +212,7 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src,
}
-FALRUBlk*
+CacheBlk*
FALRU::findBlock(Addr addr, bool is_secure) const
{
Addr blkAddr = blkAlign(addr);
@@ -220,7 +226,7 @@ FALRU::findBlock(Addr addr, bool is_secure) const
return blk;
}
-FALRUBlk*
+CacheBlk*
FALRU::findVictim(Addr addr)
{
FALRUBlk * blk = tail;
@@ -243,7 +249,7 @@ FALRU::findVictim(Addr addr)
}
void
-FALRU::insertBlock(PacketPtr pkt, FALRU::BlkType *blk)
+FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
{
}
diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh
index 07a31c154..fd183ab03 100644
--- a/src/mem/cache/tags/fa_lru.hh
+++ b/src/mem/cache/tags/fa_lru.hh
@@ -174,7 +174,7 @@ public:
* Invalidate a cache block.
* @param blk The block to invalidate.
*/
- void invalidate(BlkType *blk);
+ void invalidate(CacheBlk *blk);
/**
* Access block and update replacement data. May not succeed, in which case
@@ -188,8 +188,14 @@ public:
* @param inCache The FALRUBlk::inCache flags.
* @return Pointer to the cache block.
*/
- FALRUBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
- int context_src, int *inCache = 0);
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ int context_src, int *inCache);
+
+ /**
+ * Just a wrapper of above function to conform with the base interface.
+ */
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ int context_src);
/**
* Find the block in the cache, do not update the replacement data.
@@ -198,16 +204,16 @@ public:
* @param asid The address space ID.
* @return Pointer to the cache block.
*/
- FALRUBlk* findBlock(Addr addr, bool is_secure) const;
+ CacheBlk* findBlock(Addr addr, bool is_secure) const;
/**
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @return The block to place the replacement in.
*/
- FALRUBlk* findVictim(Addr addr);
+ CacheBlk* findVictim(Addr addr);
- void insertBlock(PacketPtr pkt, BlkType *blk);
+ void insertBlock(PacketPtr pkt, CacheBlk *blk);
/**
* Return the block size of this cache.
@@ -261,22 +267,12 @@ public:
}
/**
- * Calculate the block offset of an address.
- * @param addr the address to get the offset of.
- * @return the block offset.
- */
- int extractBlkOffset(Addr addr) const
- {
- return (addr & (Addr)(blkSize-1));
- }
-
- /**
* Regenerate the block address from the tag and the set.
* @param tag The tag of the block.
* @param set The set the block belongs to.
* @return the block address.
*/
- Addr regenerateBlkAddr(Addr tag, int set) const
+ Addr regenerateBlkAddr(Addr tag, unsigned set) const
{
return (tag);
}
@@ -304,8 +300,7 @@ public:
*
* \param visitor Visitor to call on each block.
*/
- template <typename V>
- void forEachBlk(V &visitor) {
+ void forEachBlk(CacheBlkVisitor &visitor) M5_ATTR_OVERRIDE {
for (int i = 0; i < numBlocks; i++) {
if (!visitor(blks[i]))
return;
diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc
index 290d1bdd0..f3a286602 100644
--- a/src/mem/cache/tags/lru.cc
+++ b/src/mem/cache/tags/lru.cc
@@ -54,10 +54,10 @@ LRU::LRU(const Params *p)
{
}
-BaseSetAssoc::BlkType*
+CacheBlk*
LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{
- BlkType *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
+ CacheBlk *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
if (blk != NULL) {
// move this block to head of the MRU list
@@ -70,8 +70,8 @@ LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
return blk;
}
-BaseSetAssoc::BlkType*
-LRU::findVictim(Addr addr) const
+CacheBlk*
+LRU::findVictim(Addr addr)
{
int set = extractSet(addr);
// grab a replacement candidate
@@ -95,7 +95,7 @@ LRU::insertBlock(PacketPtr pkt, BlkType *blk)
}
void
-LRU::invalidate(BlkType *blk)
+LRU::invalidate(CacheBlk *blk)
{
BaseSetAssoc::invalidate(blk);
diff --git a/src/mem/cache/tags/lru.hh b/src/mem/cache/tags/lru.hh
index a034dd6ab..df5a8e39d 100644
--- a/src/mem/cache/tags/lru.hh
+++ b/src/mem/cache/tags/lru.hh
@@ -69,11 +69,11 @@ class LRU : public BaseSetAssoc
*/
~LRU() {}
- BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src);
- BlkType* findVictim(Addr addr) const;
+ CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk);
- void invalidate(BlkType *blk);
+ void invalidate(CacheBlk *blk);
};
#endif // __MEM_CACHE_TAGS_LRU_HH__
diff --git a/src/mem/cache/tags/random_repl.cc b/src/mem/cache/tags/random_repl.cc
index 77b379135..e7422a335 100644
--- a/src/mem/cache/tags/random_repl.cc
+++ b/src/mem/cache/tags/random_repl.cc
@@ -44,16 +44,16 @@ RandomRepl::RandomRepl(const Params *p)
{
}
-BaseSetAssoc::BlkType*
+CacheBlk*
RandomRepl::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{
return BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
}
-BaseSetAssoc::BlkType*
-RandomRepl::findVictim(Addr addr) const
+CacheBlk*
+RandomRepl::findVictim(Addr addr)
{
- BlkType *blk = BaseSetAssoc::findVictim(addr);
+ CacheBlk *blk = BaseSetAssoc::findVictim(addr);
// if all blocks are valid, pick a replacement at random
if (blk->isValid()) {
@@ -77,7 +77,7 @@ RandomRepl::insertBlock(PacketPtr pkt, BlkType *blk)
}
void
-RandomRepl::invalidate(BlkType *blk)
+RandomRepl::invalidate(CacheBlk *blk)
{
BaseSetAssoc::invalidate(blk);
}
diff --git a/src/mem/cache/tags/random_repl.hh b/src/mem/cache/tags/random_repl.hh
index 7f2795379..642c76777 100644
--- a/src/mem/cache/tags/random_repl.hh
+++ b/src/mem/cache/tags/random_repl.hh
@@ -58,11 +58,11 @@ class RandomRepl : public BaseSetAssoc
*/
~RandomRepl() {}
- BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src);
- BlkType* findVictim(Addr addr) const;
+ CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk);
- void invalidate(BlkType *blk);
+ void invalidate(CacheBlk *blk);
};
#endif // __MEM_CACHE_TAGS_RANDOM_REPL_HH__