summaryrefslogtreecommitdiff
path: root/src/mem/cache/tags/fa_lru.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/cache/tags/fa_lru.cc')
-rw-r--r--src/mem/cache/tags/fa_lru.cc338
1 files changed, 192 insertions, 146 deletions
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index b75497ea2..49c0d340a 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013,2016-2017 ARM Limited
+ * Copyright (c) 2013,2016-2018 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@@ -38,6 +38,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
+ * Nikos Nikoleris
*/
/**
@@ -54,7 +55,9 @@
#include "base/logging.hh"
FALRU::FALRU(const Params *p)
- : BaseTags(p), cacheBoundaries(nullptr)
+ : BaseTags(p),
+
+ cacheTracking(p->min_tracked_cache_size, size, blkSize)
{
if (!isPowerOf2(blkSize))
fatal("cache block size (in bytes) `%d' must be a power of two",
@@ -62,40 +65,16 @@ FALRU::FALRU(const Params *p)
if (!isPowerOf2(size))
fatal("Cache Size must be power of 2 for now");
- // Track all cache sizes from 128K up by powers of 2
- numCaches = floorLog2(size) - 17;
- if (numCaches > 0){
- cacheBoundaries = new FALRUBlk *[numCaches];
- cacheMask = (ULL(1) << numCaches) - 1;
- } else {
- cacheMask = 0;
- }
-
blks = new FALRUBlk[numBlocks];
- head = &(blks[0]);
- tail = &(blks[numBlocks-1]);
+ head = &(blks[0]);
head->prev = nullptr;
head->next = &(blks[1]);
- head->inCache = cacheMask;
+ head->set = 0;
+ head->way = 0;
head->data = &dataBlks[0];
- tail->prev = &(blks[numBlocks-2]);
- tail->next = nullptr;
- tail->inCache = 0;
- tail->data = &dataBlks[(numBlocks-1)*blkSize];
-
- unsigned index = (1 << 17) / blkSize;
- unsigned j = 0;
- int flags = cacheMask;
for (unsigned i = 1; i < numBlocks - 1; i++) {
- blks[i].inCache = flags;
- if (i == index - 1){
- cacheBoundaries[j] = &(blks[i]);
- flags &= ~ (1<<j);
- ++j;
- index = index << 1;
- }
blks[i].prev = &(blks[i-1]);
blks[i].next = &(blks[i+1]);
blks[i].set = 0;
@@ -104,16 +83,19 @@ FALRU::FALRU(const Params *p)
// Associate a data chunk to the block
blks[i].data = &dataBlks[blkSize*i];
}
- assert(j == numCaches);
- assert(index == numBlocks);
- //assert(check());
+
+ tail = &(blks[numBlocks - 1]);
+ tail->prev = &(blks[numBlocks - 2]);
+ tail->next = nullptr;
+ tail->set = 0;
+ tail->way = numBlocks - 1;
+ tail->data = &dataBlks[(numBlocks - 1) * blkSize];
+
+ cacheTracking.init(head, tail);
}
FALRU::~FALRU()
{
- if (numCaches)
- delete[] cacheBoundaries;
-
delete[] blks;
}
@@ -121,34 +103,7 @@ void
FALRU::regStats()
{
BaseTags::regStats();
- hits
- .init(numCaches+1)
- .name(name() + ".falru_hits")
- .desc("The number of hits in each cache size.")
- ;
- misses
- .init(numCaches+1)
- .name(name() + ".falru_misses")
- .desc("The number of misses in each cache size.")
- ;
- accesses
- .name(name() + ".falru_accesses")
- .desc("The number of accesses to the FA LRU cache.")
- ;
-
- for (unsigned i = 0; i <= numCaches; ++i) {
- std::stringstream size_str;
- if (i < 3){
- size_str << (1<<(i+7)) <<"K";
- } else {
- size_str << (1<<(i-3)) <<"M";
- }
-
- hits.subname(i, size_str.str());
- hits.subdesc(i, "Hits in a " + size_str.str() +" cache");
- misses.subname(i, size_str.str());
- misses.subdesc(i, "Misses in a " + size_str.str() +" cache");
- }
+ cacheTracking.regStats(name());
}
FALRUBlk *
@@ -180,10 +135,10 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
}
CacheBlk*
-FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int *inCache)
+FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CachesMask *in_caches_mask)
{
- accesses++;
- int tmp_in_cache = 0;
+ CachesMask mask = 0;
Addr blkAddr = blkAlign(addr);
FALRUBlk* blk = hashLookup(blkAddr);
@@ -199,31 +154,19 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int *inCache)
accessLatency;
}
assert(blk->tag == blkAddr);
- tmp_in_cache = blk->inCache;
- for (unsigned i = 0; i < numCaches; i++) {
- if (1<<i & blk->inCache) {
- hits[i]++;
- } else {
- misses[i]++;
- }
- }
- hits[numCaches]++;
- if (blk != head){
- moveToHead(blk);
- }
+ mask = blk->inCachesMask;
+ moveToHead(blk);
} else {
// If a cache miss
lat = lookupLatency;
blk = nullptr;
- for (unsigned i = 0; i <= numCaches; ++i) {
- misses[i]++;
- }
}
- if (inCache) {
- *inCache = tmp_in_cache;
+ if (in_caches_mask) {
+ *in_caches_mask = mask;
}
- //assert(check());
+ cacheTracking.recordAccess(blk);
+
return blk;
}
@@ -261,7 +204,7 @@ FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
// Make sure block is not present in the cache
- assert(falruBlk->inCache == 0);
+ assert(falruBlk->inCachesMask == 0);
// Do common block insertion functionality
BaseTags::insertBlock(pkt, blk);
@@ -271,8 +214,6 @@ FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
// Insert new block in the hash table
tagHash[falruBlk->tag] = falruBlk;
-
- //assert(check());
}
void
@@ -280,27 +221,7 @@ FALRU::moveToHead(FALRUBlk *blk)
{
// If block is not already head, do the moving
if (blk != head) {
- // Get all caches that this block does not reside in
- int updateMask = blk->inCache ^ cacheMask;
-
- // Update boundaries for all cache sizes
- for (unsigned i = 0; i < numCaches; i++){
- // If block has been moved to a place before this boundary,
- // all blocks in it will be pushed towards the LRU position,
- // making one leave the boundary
- if ((1U<<i) & updateMask) {
- cacheBoundaries[i]->inCache &= ~(1U<<i);
- cacheBoundaries[i] = cacheBoundaries[i]->prev;
- // If the block resides exactly at this boundary, the previous
- // block is pushed to its position
- } else if (cacheBoundaries[i] == blk) {
- cacheBoundaries[i] = blk->prev;
- }
- }
-
- // Make block reside in all caches
- blk->inCache = cacheMask;
-
+ cacheTracking.moveBlockToHead(blk);
// If block is tail, set previous block as new tail
if (blk == tail){
assert(blk->next == nullptr);
@@ -317,6 +238,8 @@ FALRU::moveToHead(FALRUBlk *blk)
blk->prev = nullptr;
head->prev = blk;
head = blk;
+
+ cacheTracking.check(head, tail);
}
}
@@ -325,26 +248,7 @@ FALRU::moveToTail(FALRUBlk *blk)
{
// If block is not already tail, do the moving
if (blk != tail) {
- // Update boundaries for all cache sizes
- for (unsigned i = 0; i < numCaches; i++){
- // If block has been moved to a place after this boundary,
- // all blocks in it will be pushed towards the MRU position,
- // making one enter the boundary
- if ((1U<<i) & blk->inCache) {
- // If the first block after the boundary is the block,
- // get its successor
- if (cacheBoundaries[i]->next == blk){
- cacheBoundaries[i] = cacheBoundaries[i]->next->next;
- } else {
- cacheBoundaries[i] = cacheBoundaries[i]->next;
- }
- cacheBoundaries[i]->inCache |= (1U<<i);
- }
- }
-
- // Make block reside in the last cache only
- blk->inCache = 0;
-
+ cacheTracking.moveBlockToTail(blk);
// If block is head, set next block as new head
if (blk == head){
assert(blk->prev == nullptr);
@@ -361,38 +265,180 @@ FALRU::moveToTail(FALRUBlk *blk)
blk->next = nullptr;
tail->next = blk;
tail = blk;
+
+ cacheTracking.check(head, tail);
}
}
-bool
-FALRU::check()
+FALRU *
+FALRUParams::create()
{
+ return new FALRU(this);
+}
+
+void
+FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
+{
+#ifdef FALRU_DEBUG
FALRUBlk* blk = head;
- int tot_size = 0;
- int boundary = 1<<17;
+ unsigned curr_size = 0;
+ unsigned tracked_cache_size = minTrackedSize;
+ CachesMask in_caches_mask = inAllCachesMask;
int j = 0;
- int flags = cacheMask;
+
while (blk) {
- tot_size += blkSize;
- if (blk->inCache != flags) {
- return false;
+ panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
+ "%x found %x", blk->inCachesMask, in_caches_mask);
+
+ curr_size += blkSize;
+ if (curr_size == tracked_cache_size && blk != tail) {
+ panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
+ "cache", j);
+ tracked_cache_size <<= 1;
+ // from this point, blocks fit only in the larger caches
+ in_caches_mask &= ~(1U << j);
+ ++j;
}
- if (tot_size == boundary && blk != tail) {
- if (cacheBoundaries[j] != blk) {
- return false;
- }
- flags &=~(1 << j);
- boundary = boundary<<1;
+ blk = blk->next;
+ }
+#endif // FALRU_DEBUG
+}
+
+void
+FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
+{
+ // early exit if we are not tracking any extra caches
+ FALRUBlk* blk = numTrackedCaches ? head : nullptr;
+ unsigned curr_size = 0;
+ unsigned tracked_cache_size = minTrackedSize;
+ CachesMask in_caches_mask = inAllCachesMask;
+ int j = 0;
+
+ while (blk) {
+ blk->inCachesMask = in_caches_mask;
+
+ curr_size += blkSize;
+ if (curr_size == tracked_cache_size && blk != tail) {
+ boundaries[j] = blk;
+
+ tracked_cache_size <<= 1;
+ // from this point, blocks fit only in the larger caches
+ in_caches_mask &= ~(1U << j);
++j;
}
blk = blk->next;
}
- return true;
}
-FALRU *
-FALRUParams::create()
+
+void
+FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
{
- return new FALRU(this);
+ // Get the mask of all caches, in which the block didn't fit
+ // before moving it to the head
+ CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
+
+ for (int i = 0; i < numTrackedCaches; i++) {
+ CachesMask current_cache_mask = 1U << i;
+ if (current_cache_mask & update_caches_mask) {
+ // if the ith cache didn't fit the block (before it is moved to
+ // the head), move the ith boundary 1 block closer to the
+ // MRU
+ boundaries[i]->inCachesMask &= ~current_cache_mask;
+ boundaries[i] = boundaries[i]->prev;
+ } else if (boundaries[i] == blk) {
+ // Make sure the boundary doesn't point to the block
+ // we are about to move
+ boundaries[i] = blk->prev;
+ }
+ }
+
+ // Make block reside in all caches
+ blk->inCachesMask = inAllCachesMask;
+}
+
+void
+FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
+{
+ CachesMask update_caches_mask = blk->inCachesMask;
+
+ for (int i = 0; i < numTrackedCaches; i++) {
+ CachesMask current_cache_mask = 1U << i;
+ if (current_cache_mask & update_caches_mask) {
+ // if the ith cache fitted the block (before it is moved to
+ // the tail), move the ith boundary 1 block closer to the
+ // LRU
+ boundaries[i] = boundaries[i]->next;
+ if (boundaries[i] == blk) {
+ // Make sure the boundary doesn't point to the block
+ // we are about to move
+ boundaries[i] = blk->next;
+ }
+ boundaries[i]->inCachesMask |= current_cache_mask;
+ }
+ }
+
+ // The block now fits only in the actual cache
+ blk->inCachesMask = 0;
+}
+
+void
+FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
+{
+ for (int i = 0; i < numTrackedCaches; i++) {
+ if (blk && ((1U << i) & blk->inCachesMask)) {
+ hits[i]++;
+ } else {
+ misses[i]++;
+ }
+ }
+
+ // Record stats for the actual cache too
+ if (blk) {
+ hits[numTrackedCaches]++;
+ } else {
+ misses[numTrackedCaches]++;
+ }
+
+ accesses++;
}
+void
+printSize(std::ostream &stream, size_t size)
+{
+ static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
+ int div = 0;
+ while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
+ div++;
+ size >>= 10;
+ }
+ stream << size << SIZES[div];
+}
+
+void
+FALRU::CacheTracking::regStats(std::string name)
+{
+ hits
+ .init(numTrackedCaches + 1)
+ .name(name + ".falru_hits")
+ .desc("The number of hits in each cache size.")
+ ;
+ misses
+ .init(numTrackedCaches + 1)
+ .name(name + ".falru_misses")
+ .desc("The number of misses in each cache size.")
+ ;
+ accesses
+ .name(name + ".falru_accesses")
+ .desc("The number of accesses to the FA LRU cache.")
+ ;
+
+ for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
+ std::stringstream size_str;
+ printSize(size_str, minTrackedSize << i);
+ hits.subname(i, size_str.str());
+ hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
+ misses.subname(i, size_str.str());
+ misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
+ }
+}