summaryrefslogtreecommitdiff
path: root/src/mem/cache/tags/base_set_assoc.hh
diff options
context:
space:
mode:
authorMarco Balboni <Marco.Balboni@ARM.com>2015-02-11 10:23:36 -0500
committerMarco Balboni <Marco.Balboni@ARM.com>2015-02-11 10:23:36 -0500
commite2828587b3f28c4f37f0fe598209290bc3d41de0 (patch)
treed0d967c233c0da3d07f045806d6c48e9b6b06190 /src/mem/cache/tags/base_set_assoc.hh
parent5a573762d0b27eb26a572581611df2196656641f (diff)
downloadgem5-e2828587b3f28c4f37f0fe598209290bc3d41de0.tar.xz
mem: Clarify usage of latency in the cache
This patch adds some much-needed clarity in the specification of the cache timing. For now, hit_latency and response_latency are kept as top-level parameters, but the cache itself has a number of local variables to better map the individual timing variables to different behaviours (and sub-components). The introduced variables are: - lookupLatency: latency of tag lookup, occuring on any access - forwardLatency: latency that occurs in case of outbound miss - fillLatency: latency to fill a cache block We keep the existing responseLatency The forwardLatency is used by allocateInternalBuffer() for: - MSHR allocateWriteBuffer (unchached write forwarded to WriteBuffer); - MSHR allocateMissBuffer (cacheable miss in MSHR queue); - MSHR allocateUncachedReadBuffer (unchached read allocated in MSHR queue) It is our assumption that the time for the above three buffers is the same. Similarly, for snoop responses passing through the cache we use forwardLatency.
Diffstat (limited to 'src/mem/cache/tags/base_set_assoc.hh')
-rw-r--r--src/mem/cache/tags/base_set_assoc.hh12
1 files changed, 2 insertions, 10 deletions
diff --git a/src/mem/cache/tags/base_set_assoc.hh b/src/mem/cache/tags/base_set_assoc.hh
index ac575d2ff..0107aafaf 100644
--- a/src/mem/cache/tags/base_set_assoc.hh
+++ b/src/mem/cache/tags/base_set_assoc.hh
@@ -178,7 +178,7 @@ public:
Addr tag = extractTag(addr);
int set = extractSet(addr);
BlkType *blk = sets[set].findBlk(tag, is_secure);
- lat = hitLatency;
+ lat = accessLatency;;
// Access all tags in parallel, hence one in each way. The data side
// either accesses all blocks in parallel, or one block sequentially on
@@ -195,7 +195,7 @@ public:
if (blk != NULL) {
if (blk->whenReady > curTick()
&& cache->ticksToCycles(blk->whenReady - curTick())
- > hitLatency) {
+ > accessLatency) {
lat = cache->ticksToCycles(blk->whenReady - curTick());
}
blk->refCount += 1;
@@ -343,14 +343,6 @@ public:
}
/**
- * Return the hit latency.
- * @return the hit latency.
- */
- Cycles getHitLatency() const
- {
- return hitLatency;
- }
- /**
*iterated through all blocks and clear all locks
*Needed to clear all lock tracking at once
*/