diff options
author | Timothy M. Jones <timothy.jones@arm.com> | 2014-01-24 15:29:30 -0600 |
---|---|---|
committer | Timothy M. Jones <timothy.jones@arm.com> | 2014-01-24 15:29:30 -0600 |
commit | 427ceb57a9e1c7e226a549fda4556211bf206066 (patch) | |
tree | c7e3343e83232a4b37ba5d136519c2876f612075 /src | |
parent | 85e8779de78ed913bb6d2a794bee5252d719b0e5 (diff) | |
download | gem5-427ceb57a9e1c7e226a549fda4556211bf206066.tar.xz |
Cache: Collect very basic stats on tag and data accesses
Adds very basic statistics on the number of tag and data accesses within the
cache, which is important for power modelling. For the tags, simply count
the associativity of the cache each time. For the data, this depends on
whether tags and data are accessed sequentially, which is given by a new
parameter. In the parallel case, all data blocks are accessed each time, but
with sequential accesses, a single data block is accessed only on a hit.
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/cache/BaseCache.py | 2 | ||||
-rw-r--r-- | src/mem/cache/tags/Tags.py | 2 | ||||
-rw-r--r-- | src/mem/cache/tags/base.cc | 10 | ||||
-rw-r--r-- | src/mem/cache/tags/base.hh | 5 | ||||
-rw-r--r-- | src/mem/cache/tags/lru.cc | 20 | ||||
-rw-r--r-- | src/mem/cache/tags/lru.hh | 2 |
6 files changed, 40 insertions, 1 deletions
diff --git a/src/mem/cache/BaseCache.py b/src/mem/cache/BaseCache.py index df4602199..9ffe39981 100644 --- a/src/mem/cache/BaseCache.py +++ b/src/mem/cache/BaseCache.py @@ -69,4 +69,6 @@ class BaseCache(MemObject): mem_side = MasterPort("Port on side closer to MEM") addr_ranges = VectorParam.AddrRange([AllMemory], "The address range for the CPU-side port") system = Param.System(Parent.any, "System we belong to") + sequential_access = Param.Bool(False, + "Whether to access tags and data sequentially") tags = Param.BaseTags(LRU(), "Tag Store for LRU caches") diff --git a/src/mem/cache/tags/Tags.py b/src/mem/cache/tags/Tags.py index c5beff4a7..7c0dded32 100644 --- a/src/mem/cache/tags/Tags.py +++ b/src/mem/cache/tags/Tags.py @@ -58,6 +58,8 @@ class LRU(BaseTags): cxx_class = 'LRU' cxx_header = "mem/cache/tags/lru.hh" assoc = Param.Int(Parent.assoc, "associativity") + sequential_access = Param.Bool(Parent.sequential_access, + "Whether to access tags and data sequentially") class FALRU(BaseTags): type = 'FALRU' diff --git a/src/mem/cache/tags/base.cc b/src/mem/cache/tags/base.cc index b669a5b06..446c1ea49 100644 --- a/src/mem/cache/tags/base.cc +++ b/src/mem/cache/tags/base.cc @@ -147,6 +147,16 @@ BaseTags::regStats() percentOccsTaskId = occupanciesTaskId / Stats::constant(numBlocks); + tagAccesses + .name(name() + ".tag_accesses") + .desc("Number of tag accesses") + ; + + dataAccesses + .name(name() + ".data_accesses") + .desc("Number of data accesses") + ; + registerDumpCallback(new BaseTagsDumpCallback(this)); registerExitCallback(new BaseTagsCallback(this)); } diff --git a/src/mem/cache/tags/base.hh b/src/mem/cache/tags/base.hh index e8c71f01f..9e1fb1972 100644 --- a/src/mem/cache/tags/base.hh +++ b/src/mem/cache/tags/base.hh @@ -130,6 +130,11 @@ class BaseTags : public ClockedObject /** Occ % of each context/cpu using the cache */ Stats::Formula percentOccsTaskId; + /** Number of tags consulted over all accesses. */ + Stats::Scalar tagAccesses; + /** Number of data blocks consulted over all accesses. */ + Stats::Scalar dataAccesses; + /** * @} */ diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc index 6b05744af..58f3f0977 100644 --- a/src/mem/cache/tags/lru.cc +++ b/src/mem/cache/tags/lru.cc @@ -58,7 +58,8 @@ using namespace std; LRU::LRU(const Params *p) :BaseTags(p), assoc(p->assoc), - numSets(p->size / (p->block_size * p->assoc)) + numSets(p->size / (p->block_size * p->assoc)), + sequentialAccess(p->sequential_access) { // Check parameters if (blkSize < 4 || !isPowerOf2(blkSize)) { @@ -132,6 +133,19 @@ LRU::accessBlock(Addr addr, Cycles &lat, int master_id) unsigned set = extractSet(addr); BlkType *blk = sets[set].findBlk(tag); lat = hitLatency; + + // Access all tags in parallel, hence one in each way. The data side + // either accesses all blocks in parallel, or one block sequentially on + // a hit. Sequential access with a miss doesn't access data. + tagAccesses += assoc; + if (sequentialAccess) { + if (blk != NULL) { + dataAccesses += 1; + } + } else { + dataAccesses += assoc; + } + if (blk != NULL) { // move this block to head of the MRU list sets[set].moveToHead(blk); @@ -216,6 +230,10 @@ LRU::insertBlock(PacketPtr pkt, BlkType *blk) unsigned set = extractSet(addr); sets[set].moveToHead(blk); + + // We only need to write into one tag and one data block. + tagAccesses += 1; + dataAccesses += 1; } void diff --git a/src/mem/cache/tags/lru.hh b/src/mem/cache/tags/lru.hh index 68c29b754..b9f8fc25c 100644 --- a/src/mem/cache/tags/lru.hh +++ b/src/mem/cache/tags/lru.hh @@ -81,6 +81,8 @@ class LRU : public BaseTags const unsigned assoc; /** The number of sets in the cache. */ const unsigned numSets; + /** Whether tags and data are accessed sequentially. */ + const bool sequentialAccess; /** The cache sets. */ SetType *sets; |