summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikos Nikoleris <nikos.nikoleris@arm.com>2018-04-12 16:57:21 +0100
committerNikos Nikoleris <nikos.nikoleris@arm.com>2018-04-18 13:37:48 +0000
commit5cebd91336cbfbf46d6bffe9a01b7eea55e62f44 (patch)
tree00928622e5a4b07cd03536c550946adbef9ab642
parent2ecc1756254ba571318ffe30130bad35a99a2d39 (diff)
downloadgem5-5cebd91336cbfbf46d6bffe9a01b7eea55e62f44.tar.xz
mem-cache: Revamp multiple size tracking for FALRU caches
This change fixes a few bugs and refactors the mechanism by which caches that use the FALRU tags can output statistics for multiple cache sizes ranging from the minimum cache of interest up to the actual configured cache size. Change-Id: Ibea029cf275a8c068c26eceeb06c761fc53aede2 Reviewed-on: https://gem5-review.googlesource.com/9826 Reviewed-by: Daniel Carvalho <odanrc@yahoo.com.br> Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com>
-rw-r--r--src/mem/cache/tags/Tags.py3
-rw-r--r--src/mem/cache/tags/fa_lru.cc338
-rw-r--r--src/mem/cache/tags/fa_lru.hh188
3 files changed, 337 insertions, 192 deletions
diff --git a/src/mem/cache/tags/Tags.py b/src/mem/cache/tags/Tags.py
index ff4eff44e..76cd8d6b9 100644
--- a/src/mem/cache/tags/Tags.py
+++ b/src/mem/cache/tags/Tags.py
@@ -77,3 +77,6 @@ class FALRU(BaseTags):
type = 'FALRU'
cxx_class = 'FALRU'
cxx_header = "mem/cache/tags/fa_lru.hh"
+
+ min_tracked_cache_size = Param.MemorySize("128kB", "Minimum cache size for"
+ " which we track statistics")
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index b75497ea2..49c0d340a 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013,2016-2017 ARM Limited
+ * Copyright (c) 2013,2016-2018 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@@ -38,6 +38,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
+ * Nikos Nikoleris
*/
/**
@@ -54,7 +55,9 @@
#include "base/logging.hh"
FALRU::FALRU(const Params *p)
- : BaseTags(p), cacheBoundaries(nullptr)
+ : BaseTags(p),
+
+ cacheTracking(p->min_tracked_cache_size, size, blkSize)
{
if (!isPowerOf2(blkSize))
fatal("cache block size (in bytes) `%d' must be a power of two",
@@ -62,40 +65,16 @@ FALRU::FALRU(const Params *p)
if (!isPowerOf2(size))
fatal("Cache Size must be power of 2 for now");
- // Track all cache sizes from 128K up by powers of 2
- numCaches = floorLog2(size) - 17;
- if (numCaches > 0){
- cacheBoundaries = new FALRUBlk *[numCaches];
- cacheMask = (ULL(1) << numCaches) - 1;
- } else {
- cacheMask = 0;
- }
-
blks = new FALRUBlk[numBlocks];
- head = &(blks[0]);
- tail = &(blks[numBlocks-1]);
+ head = &(blks[0]);
head->prev = nullptr;
head->next = &(blks[1]);
- head->inCache = cacheMask;
+ head->set = 0;
+ head->way = 0;
head->data = &dataBlks[0];
- tail->prev = &(blks[numBlocks-2]);
- tail->next = nullptr;
- tail->inCache = 0;
- tail->data = &dataBlks[(numBlocks-1)*blkSize];
-
- unsigned index = (1 << 17) / blkSize;
- unsigned j = 0;
- int flags = cacheMask;
for (unsigned i = 1; i < numBlocks - 1; i++) {
- blks[i].inCache = flags;
- if (i == index - 1){
- cacheBoundaries[j] = &(blks[i]);
- flags &= ~ (1<<j);
- ++j;
- index = index << 1;
- }
blks[i].prev = &(blks[i-1]);
blks[i].next = &(blks[i+1]);
blks[i].set = 0;
@@ -104,16 +83,19 @@ FALRU::FALRU(const Params *p)
// Associate a data chunk to the block
blks[i].data = &dataBlks[blkSize*i];
}
- assert(j == numCaches);
- assert(index == numBlocks);
- //assert(check());
+
+ tail = &(blks[numBlocks - 1]);
+ tail->prev = &(blks[numBlocks - 2]);
+ tail->next = nullptr;
+ tail->set = 0;
+ tail->way = numBlocks - 1;
+ tail->data = &dataBlks[(numBlocks - 1) * blkSize];
+
+ cacheTracking.init(head, tail);
}
FALRU::~FALRU()
{
- if (numCaches)
- delete[] cacheBoundaries;
-
delete[] blks;
}
@@ -121,34 +103,7 @@ void
FALRU::regStats()
{
BaseTags::regStats();
- hits
- .init(numCaches+1)
- .name(name() + ".falru_hits")
- .desc("The number of hits in each cache size.")
- ;
- misses
- .init(numCaches+1)
- .name(name() + ".falru_misses")
- .desc("The number of misses in each cache size.")
- ;
- accesses
- .name(name() + ".falru_accesses")
- .desc("The number of accesses to the FA LRU cache.")
- ;
-
- for (unsigned i = 0; i <= numCaches; ++i) {
- std::stringstream size_str;
- if (i < 3){
- size_str << (1<<(i+7)) <<"K";
- } else {
- size_str << (1<<(i-3)) <<"M";
- }
-
- hits.subname(i, size_str.str());
- hits.subdesc(i, "Hits in a " + size_str.str() +" cache");
- misses.subname(i, size_str.str());
- misses.subdesc(i, "Misses in a " + size_str.str() +" cache");
- }
+ cacheTracking.regStats(name());
}
FALRUBlk *
@@ -180,10 +135,10 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
}
CacheBlk*
-FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int *inCache)
+FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CachesMask *in_caches_mask)
{
- accesses++;
- int tmp_in_cache = 0;
+ CachesMask mask = 0;
Addr blkAddr = blkAlign(addr);
FALRUBlk* blk = hashLookup(blkAddr);
@@ -199,31 +154,19 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int *inCache)
accessLatency;
}
assert(blk->tag == blkAddr);
- tmp_in_cache = blk->inCache;
- for (unsigned i = 0; i < numCaches; i++) {
- if (1<<i & blk->inCache) {
- hits[i]++;
- } else {
- misses[i]++;
- }
- }
- hits[numCaches]++;
- if (blk != head){
- moveToHead(blk);
- }
+ mask = blk->inCachesMask;
+ moveToHead(blk);
} else {
// If a cache miss
lat = lookupLatency;
blk = nullptr;
- for (unsigned i = 0; i <= numCaches; ++i) {
- misses[i]++;
- }
}
- if (inCache) {
- *inCache = tmp_in_cache;
+ if (in_caches_mask) {
+ *in_caches_mask = mask;
}
- //assert(check());
+ cacheTracking.recordAccess(blk);
+
return blk;
}
@@ -261,7 +204,7 @@ FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
// Make sure block is not present in the cache
- assert(falruBlk->inCache == 0);
+ assert(falruBlk->inCachesMask == 0);
// Do common block insertion functionality
BaseTags::insertBlock(pkt, blk);
@@ -271,8 +214,6 @@ FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
// Insert new block in the hash table
tagHash[falruBlk->tag] = falruBlk;
-
- //assert(check());
}
void
@@ -280,27 +221,7 @@ FALRU::moveToHead(FALRUBlk *blk)
{
// If block is not already head, do the moving
if (blk != head) {
- // Get all caches that this block does not reside in
- int updateMask = blk->inCache ^ cacheMask;
-
- // Update boundaries for all cache sizes
- for (unsigned i = 0; i < numCaches; i++){
- // If block has been moved to a place before this boundary,
- // all blocks in it will be pushed towards the LRU position,
- // making one leave the boundary
- if ((1U<<i) & updateMask) {
- cacheBoundaries[i]->inCache &= ~(1U<<i);
- cacheBoundaries[i] = cacheBoundaries[i]->prev;
- // If the block resides exactly at this boundary, the previous
- // block is pushed to its position
- } else if (cacheBoundaries[i] == blk) {
- cacheBoundaries[i] = blk->prev;
- }
- }
-
- // Make block reside in all caches
- blk->inCache = cacheMask;
-
+ cacheTracking.moveBlockToHead(blk);
// If block is tail, set previous block as new tail
if (blk == tail){
assert(blk->next == nullptr);
@@ -317,6 +238,8 @@ FALRU::moveToHead(FALRUBlk *blk)
blk->prev = nullptr;
head->prev = blk;
head = blk;
+
+ cacheTracking.check(head, tail);
}
}
@@ -325,26 +248,7 @@ FALRU::moveToTail(FALRUBlk *blk)
{
// If block is not already tail, do the moving
if (blk != tail) {
- // Update boundaries for all cache sizes
- for (unsigned i = 0; i < numCaches; i++){
- // If block has been moved to a place after this boundary,
- // all blocks in it will be pushed towards the MRU position,
- // making one enter the boundary
- if ((1U<<i) & blk->inCache) {
- // If the first block after the boundary is the block,
- // get its successor
- if (cacheBoundaries[i]->next == blk){
- cacheBoundaries[i] = cacheBoundaries[i]->next->next;
- } else {
- cacheBoundaries[i] = cacheBoundaries[i]->next;
- }
- cacheBoundaries[i]->inCache |= (1U<<i);
- }
- }
-
- // Make block reside in the last cache only
- blk->inCache = 0;
-
+ cacheTracking.moveBlockToTail(blk);
// If block is head, set next block as new head
if (blk == head){
assert(blk->prev == nullptr);
@@ -361,38 +265,180 @@ FALRU::moveToTail(FALRUBlk *blk)
blk->next = nullptr;
tail->next = blk;
tail = blk;
+
+ cacheTracking.check(head, tail);
}
}
-bool
-FALRU::check()
+FALRU *
+FALRUParams::create()
{
+ return new FALRU(this);
+}
+
+void
+FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
+{
+#ifdef FALRU_DEBUG
FALRUBlk* blk = head;
- int tot_size = 0;
- int boundary = 1<<17;
+ unsigned curr_size = 0;
+ unsigned tracked_cache_size = minTrackedSize;
+ CachesMask in_caches_mask = inAllCachesMask;
int j = 0;
- int flags = cacheMask;
+
while (blk) {
- tot_size += blkSize;
- if (blk->inCache != flags) {
- return false;
+ panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
+ "%x found %x", blk->inCachesMask, in_caches_mask);
+
+ curr_size += blkSize;
+ if (curr_size == tracked_cache_size && blk != tail) {
+ panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
+ "cache", j);
+ tracked_cache_size <<= 1;
+ // from this point, blocks fit only in the larger caches
+ in_caches_mask &= ~(1U << j);
+ ++j;
}
- if (tot_size == boundary && blk != tail) {
- if (cacheBoundaries[j] != blk) {
- return false;
- }
- flags &=~(1 << j);
- boundary = boundary<<1;
+ blk = blk->next;
+ }
+#endif // FALRU_DEBUG
+}
+
+void
+FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
+{
+ // early exit if we are not tracking any extra caches
+ FALRUBlk* blk = numTrackedCaches ? head : nullptr;
+ unsigned curr_size = 0;
+ unsigned tracked_cache_size = minTrackedSize;
+ CachesMask in_caches_mask = inAllCachesMask;
+ int j = 0;
+
+ while (blk) {
+ blk->inCachesMask = in_caches_mask;
+
+ curr_size += blkSize;
+ if (curr_size == tracked_cache_size && blk != tail) {
+ boundaries[j] = blk;
+
+ tracked_cache_size <<= 1;
+ // from this point, blocks fit only in the larger caches
+ in_caches_mask &= ~(1U << j);
++j;
}
blk = blk->next;
}
- return true;
}
-FALRU *
-FALRUParams::create()
+
+void
+FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
{
- return new FALRU(this);
+ // Get the mask of all caches, in which the block didn't fit
+ // before moving it to the head
+ CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
+
+ for (int i = 0; i < numTrackedCaches; i++) {
+ CachesMask current_cache_mask = 1U << i;
+ if (current_cache_mask & update_caches_mask) {
+ // if the ith cache didn't fit the block (before it is moved to
+ // the head), move the ith boundary 1 block closer to the
+ // MRU
+ boundaries[i]->inCachesMask &= ~current_cache_mask;
+ boundaries[i] = boundaries[i]->prev;
+ } else if (boundaries[i] == blk) {
+ // Make sure the boundary doesn't point to the block
+ // we are about to move
+ boundaries[i] = blk->prev;
+ }
+ }
+
+ // Make block reside in all caches
+ blk->inCachesMask = inAllCachesMask;
+}
+
+void
+FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
+{
+ CachesMask update_caches_mask = blk->inCachesMask;
+
+ for (int i = 0; i < numTrackedCaches; i++) {
+ CachesMask current_cache_mask = 1U << i;
+ if (current_cache_mask & update_caches_mask) {
+ // if the ith cache fitted the block (before it is moved to
+ // the tail), move the ith boundary 1 block closer to the
+ // LRU
+ boundaries[i] = boundaries[i]->next;
+ if (boundaries[i] == blk) {
+ // Make sure the boundary doesn't point to the block
+ // we are about to move
+ boundaries[i] = blk->next;
+ }
+ boundaries[i]->inCachesMask |= current_cache_mask;
+ }
+ }
+
+ // The block now fits only in the actual cache
+ blk->inCachesMask = 0;
+}
+
+void
+FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
+{
+ for (int i = 0; i < numTrackedCaches; i++) {
+ if (blk && ((1U << i) & blk->inCachesMask)) {
+ hits[i]++;
+ } else {
+ misses[i]++;
+ }
+ }
+
+ // Record stats for the actual cache too
+ if (blk) {
+ hits[numTrackedCaches]++;
+ } else {
+ misses[numTrackedCaches]++;
+ }
+
+ accesses++;
}
+void
+printSize(std::ostream &stream, size_t size)
+{
+ static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
+ int div = 0;
+ while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
+ div++;
+ size >>= 10;
+ }
+ stream << size << SIZES[div];
+}
+
+void
+FALRU::CacheTracking::regStats(std::string name)
+{
+ hits
+ .init(numTrackedCaches + 1)
+ .name(name + ".falru_hits")
+ .desc("The number of hits in each cache size.")
+ ;
+ misses
+ .init(numTrackedCaches + 1)
+ .name(name + ".falru_misses")
+ .desc("The number of misses in each cache size.")
+ ;
+ accesses
+ .name(name + ".falru_accesses")
+ .desc("The number of accesses to the FA LRU cache.")
+ ;
+
+ for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
+ std::stringstream size_str;
+ printSize(size_str, minTrackedSize << i);
+ hits.subname(i, size_str.str());
+ hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
+ misses.subname(i, size_str.str());
+ misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
+ }
+}
diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh
index 73d66604f..bec98e3a7 100644
--- a/src/mem/cache/tags/fa_lru.hh
+++ b/src/mem/cache/tags/fa_lru.hh
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2013,2016 ARM Limited
+ * Copyright (c) 2012-2013,2016,2018 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@@ -38,6 +38,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
+ * Nikos Nikoleris
*/
/**
@@ -51,12 +52,23 @@
#include <list>
#include <unordered_map>
+#include "base/intmath.hh"
#include "mem/cache/base.hh"
#include "mem/cache/blk.hh"
#include "mem/cache/tags/base.hh"
#include "mem/packet.hh"
#include "params/FALRU.hh"
+// Uncomment to enable sanity checks for the FALRU cache and the
+// TrackedCaches class
+//#define FALRU_DEBUG
+
+// A bitmask of the caches we are keeping track of. Currently the
+// lowest bit is the smallest cache we are tracking, as it is
+// specified by the corresponding parameter. The rest of the bits are
+// for exponentially growing cache sizes.
+typedef uint32_t CachesMask;
+
/**
* A fully associative cache block.
*/
@@ -68,15 +80,8 @@ class FALRUBlk : public CacheBlk
/** The next block in LRU order. */
FALRUBlk *next;
- /**
- * A bit mask of the sizes of cache that this block is resident in.
- * Each bit represents a power of 2 in MB size cache.
- * If bit 0 is set, this block is in a 1MB cache
- * If bit 2 is set, this block is in a 4MB cache, etc.
- * There is one bit for each cache smaller than the full size (default
- * 16MB).
- */
- int inCache;
+ /** A bit mask of the caches that fit this block. */
+ CachesMask inCachesMask;
};
/**
@@ -90,13 +95,6 @@ class FALRU : public BaseTags
typedef FALRUBlk BlkType;
protected:
- /** Array of pointers to blocks at the cache size boundaries. */
- FALRUBlk **cacheBoundaries;
- /** A mask for the FALRUBlk::inCache bits. */
- int cacheMask;
- /** The number of different size caches being tracked. */
- unsigned numCaches;
-
/** The cache blocks. */
FALRUBlk *blks;
@@ -134,31 +132,6 @@ class FALRU : public BaseTags
*/
void moveToTail(FALRUBlk *blk);
- /**
- * Check to make sure all the cache boundaries are still where they should
- * be. Used for debugging.
- * @return True if everything is correct.
- */
- bool check();
-
- /**
- * @defgroup FALRUStats Fully Associative LRU specific statistics
- * The FA lru stack lets us track multiple cache sizes at once. These
- * statistics track the hits and misses for different cache sizes.
- * @{
- */
-
- /** Hits in each cache size >= 128K. */
- Stats::Vector hits;
- /** Misses in each cache size >= 128K. */
- Stats::Vector misses;
- /** Total number of accesses. */
- Stats::Scalar accesses;
-
- /**
- * @}
- */
-
public:
typedef FALRUParams Params;
@@ -170,7 +143,6 @@ class FALRU : public BaseTags
/**
* Register the stats for this object.
- * @param name The name to prepend to the stats name.
*/
void regStats() override;
@@ -184,15 +156,15 @@ class FALRU : public BaseTags
* Access block and update replacement data. May not succeed, in which
* case nullptr pointer is returned. This has all the implications of a
* cache access and should only be used as such.
- * Returns the access latency and inCache flags as a side effect.
+ * Returns the access latency and inCachesMask flags as a side effect.
* @param addr The address to look for.
* @param is_secure True if the target memory space is secure.
* @param lat The latency of the access.
- * @param inCache The FALRUBlk::inCache flags.
+ * @param in_cache_mask Mask indicating the caches in which the blk fits.
* @return Pointer to the cache block.
*/
CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
- int *inCache);
+ CachesMask *in_cache_mask);
/**
* Just a wrapper of above function to conform with the base interface.
@@ -287,6 +259,130 @@ class FALRU : public BaseTags
return;
}
}
+
+ private:
+ /**
+ * Mechanism that allows us to simultaneously collect miss
+ * statistics for multiple caches. Currently, we keep track of
+ * caches from a set minimum size of interest up to the actual
+ * cache size.
+ */
+ class CacheTracking
+ {
+ public:
+ CacheTracking(unsigned min_size, unsigned max_size,
+ unsigned block_size)
+ : blkSize(block_size),
+ minTrackedSize(min_size),
+ numTrackedCaches(max_size > min_size ?
+ floorLog2(max_size) - floorLog2(min_size) : 0),
+ inAllCachesMask(mask(numTrackedCaches)),
+ boundaries(new FALRUBlk *[numTrackedCaches])
+ {
+ fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
+ "Not enough bits (%s) in type CachesMask type to keep "
+ "track of %d caches\n", sizeof(CachesMask),
+ numTrackedCaches);
+ }
+
+ ~CacheTracking()
+ {
+ delete[] boundaries;
+ }
+
+ /**
+ * Initialiaze cache blocks and the tracking mechanism
+ *
+ * All blocks in the cache need to be initialized once.
+ *
+ * @param blk the MRU block
+ * @param blk the LRU block
+ */
+ void init(FALRUBlk *head, FALRUBlk *tail);
+
+ /**
+ * Update boundaries as a block will be moved to the MRU.
+ *
+ * For all caches that didn't fit the block before moving it,
+ * we move their boundaries one block closer to the MRU. We
+ * also update InCacheMasks as neccessary.
+ *
+ * @param blk the block that will be moved to the head
+ */
+ void moveBlockToHead(FALRUBlk *blk);
+
+ /**
+ * Update boundaries as a block will be moved to the LRU.
+ *
+ * For all caches that fitted the block before moving it, we
+ * move their boundaries one block closer to the LRU. We
+ * also update InCacheMasks as neccessary.
+ *
+ * @param blk the block that will be moved to the head
+ */
+ void moveBlockToTail(FALRUBlk *blk);
+
+ /**
+ * Notify of a block access.
+ *
+ * This should be called every time a block is accessed and it
+ * updates statistics. If the input block is nullptr then we
+ * treat the access as a miss. The block's InCacheMask
+ * determines the caches in which the block fits.
+ *
+ * @param blk the block to record the access for
+ */
+ void recordAccess(FALRUBlk *blk);
+
+ /**
+ * Check that the tracking mechanism is in consistent state.
+ *
+ * Iterate from the head (MRU) to the tail (LRU) of the list
+ * of blocks and assert the inCachesMask and the boundaries
+ * are in consistent state.
+ *
+ * @param head the MRU block of the actual cache
+ * @param head the LRU block of the actual cache
+ */
+ void check(FALRUBlk *head, FALRUBlk *tail);
+
+ /**
+ * Register the stats for this object.
+ */
+ void regStats(std::string name);
+
+ private:
+ /** The size of the cache block */
+ const unsigned blkSize;
+ /** The smallest cache we are tracking */
+ const unsigned minTrackedSize;
+ /** The number of different size caches being tracked. */
+ const int numTrackedCaches;
+ /** A mask for all cache being tracked. */
+ const CachesMask inAllCachesMask;
+ /** Array of pointers to blocks at the cache boundaries. */
+ FALRUBlk** boundaries;
+
+ protected:
+ /**
+ * @defgroup FALRUStats Fully Associative LRU specific statistics
+ * The FA lru stack lets us track multiple cache sizes at once. These
+ * statistics track the hits and misses for different cache sizes.
+ * @{
+ */
+
+ /** Hits in each cache */
+ Stats::Vector hits;
+ /** Misses in each cache */
+ Stats::Vector misses;
+ /** Total number of accesses */
+ Stats::Scalar accesses;
+
+ /**
+ * @}
+ */
+ };
+ CacheTracking cacheTracking;
};
#endif // __MEM_CACHE_TAGS_FA_LRU_HH__