diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/cache/Cache.py | 3 | ||||
-rw-r--r-- | src/mem/cache/base.cc | 7 | ||||
-rw-r--r-- | src/mem/cache/base.hh | 6 | ||||
-rw-r--r-- | src/mem/cache/tags/Tags.py | 15 | ||||
-rw-r--r-- | src/mem/cache/tags/base.cc | 6 | ||||
-rw-r--r-- | src/mem/cache/tags/base.hh | 8 | ||||
-rw-r--r-- | src/mem/cache/tags/base_set_assoc.hh | 17 | ||||
-rw-r--r-- | src/mem/cache/tags/fa_lru.cc | 13 | ||||
-rw-r--r-- | src/mem/cache/tags/fa_lru.hh | 1 |
9 files changed, 59 insertions, 17 deletions
diff --git a/src/mem/cache/Cache.py b/src/mem/cache/Cache.py index 263b2fea8..dce7e5bf4 100644 --- a/src/mem/cache/Cache.py +++ b/src/mem/cache/Cache.py @@ -53,7 +53,8 @@ class BaseCache(MemObject): size = Param.MemorySize("Capacity") assoc = Param.Unsigned("Associativity") - hit_latency = Param.Cycles("Hit latency") + tag_latency = Param.Cycles("Tag lookup latency") + data_latency = Param.Cycles("Data access latency") response_latency = Param.Cycles("Latency for the return path on a miss"); max_miss_count = Param.Counter(0, diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index 814159fc9..7f08d173e 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -72,9 +72,10 @@ BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below blkSize(blk_size), - lookupLatency(p->hit_latency), - forwardLatency(p->hit_latency), - fillLatency(p->response_latency), + lookupLatency(p->tag_latency), + dataLatency(p->data_latency), + forwardLatency(p->tag_latency), + fillLatency(p->data_latency), responseLatency(p->response_latency), numTarget(p->tgts_per_mshr), forwardSnoops(true), diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh index 716969070..4f7dbb18c 100644 --- a/src/mem/cache/base.hh +++ b/src/mem/cache/base.hh @@ -265,6 +265,12 @@ class BaseCache : public MemObject const Cycles lookupLatency; /** + * The latency of data access of a cache. It occurs when there is + * an access to the cache. + */ + const Cycles dataLatency; + + /** * This is the forward latency of the cache. It occurs when there * is a cache miss and a request is forwarded downstream, in * particular an outbound miss. diff --git a/src/mem/cache/tags/Tags.py b/src/mem/cache/tags/Tags.py index ab1282ac9..6c0b85044 100644 --- a/src/mem/cache/tags/Tags.py +++ b/src/mem/cache/tags/Tags.py @@ -49,17 +49,22 @@ class BaseTags(ClockedObject): # Get the block size from the parent (system) block_size = Param.Int(Parent.cache_line_size, "block size in bytes") - # Get the hit latency from the parent (cache) - hit_latency = Param.Cycles(Parent.hit_latency, - "The hit latency for this cache") + # Get the tag lookup latency from the parent (cache) + tag_latency = Param.Cycles(Parent.tag_latency, + "The tag lookup latency for this cache") + + # Get the RAM access latency from the parent (cache) + data_latency = Param.Cycles(Parent.data_latency, + "The data access latency for this cache") + + sequential_access = Param.Bool(Parent.sequential_access, + "Whether to access tags and data sequentially") class BaseSetAssoc(BaseTags): type = 'BaseSetAssoc' abstract = True cxx_header = "mem/cache/tags/base_set_assoc.hh" assoc = Param.Int(Parent.assoc, "associativity") - sequential_access = Param.Bool(Parent.sequential_access, - "Whether to access tags and data sequentially") class LRU(BaseSetAssoc): type = 'LRU' diff --git a/src/mem/cache/tags/base.cc b/src/mem/cache/tags/base.cc index 6a926c0d8..cf970c7dd 100644 --- a/src/mem/cache/tags/base.cc +++ b/src/mem/cache/tags/base.cc @@ -56,7 +56,11 @@ using namespace std; BaseTags::BaseTags(const Params *p) : ClockedObject(p), blkSize(p->block_size), size(p->size), - accessLatency(p->hit_latency), cache(nullptr), warmupBound(0), + lookupLatency(p->tag_latency), + accessLatency(p->sequential_access ? + p->tag_latency + p->data_latency : + std::max(p->tag_latency, p->data_latency)), + cache(nullptr), warmupBound(0), warmedUp(false), numBlocks(0) { } diff --git a/src/mem/cache/tags/base.hh b/src/mem/cache/tags/base.hh index f1ef947a5..8c68cc093 100644 --- a/src/mem/cache/tags/base.hh +++ b/src/mem/cache/tags/base.hh @@ -69,7 +69,13 @@ class BaseTags : public ClockedObject const unsigned blkSize; /** The size of the cache. */ const unsigned size; - /** The access latency of the cache. */ + /** The tag lookup latency of the cache. */ + const Cycles lookupLatency; + /** + * The total access latency of the cache. This latency + * is different depending on the cache access mode + * (parallel or sequential) + */ const Cycles accessLatency; /** Pointer to the parent cache. */ BaseCache *cache; diff --git a/src/mem/cache/tags/base_set_assoc.hh b/src/mem/cache/tags/base_set_assoc.hh index 31284991f..a6dcf0572 100644 --- a/src/mem/cache/tags/base_set_assoc.hh +++ b/src/mem/cache/tags/base_set_assoc.hh @@ -208,7 +208,6 @@ public: Addr tag = extractTag(addr); int set = extractSet(addr); BlkType *blk = sets[set].findBlk(tag, is_secure); - lat = accessLatency;; // Access all tags in parallel, hence one in each way. The data side // either accesses all blocks in parallel, or one block sequentially on @@ -223,12 +222,20 @@ public: } if (blk != nullptr) { - if (blk->whenReady > curTick() - && cache->ticksToCycles(blk->whenReady - curTick()) - > accessLatency) { - lat = cache->ticksToCycles(blk->whenReady - curTick()); + // If a cache hit + lat = accessLatency; + // Check if the block to be accessed is available. If not, + // apply the accessLatency on top of block->whenReady. + if (blk->whenReady > curTick() && + cache->ticksToCycles(blk->whenReady - curTick()) > + accessLatency) { + lat = cache->ticksToCycles(blk->whenReady - curTick()) + + accessLatency; } blk->refCount += 1; + } else { + // If a cache miss + lat = lookupLatency; } return blk; diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc index 0d6a3392d..cdd0db216 100644 --- a/src/mem/cache/tags/fa_lru.cc +++ b/src/mem/cache/tags/fa_lru.cc @@ -186,6 +186,16 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src, FALRUBlk* blk = hashLookup(blkAddr); if (blk && blk->isValid()) { + // If a cache hit + lat = accessLatency; + // Check if the block to be accessed is available. If not, + // apply the accessLatency on top of block->whenReady. + if (blk->whenReady > curTick() && + cache->ticksToCycles(blk->whenReady - curTick()) > + accessLatency) { + lat = cache->ticksToCycles(blk->whenReady - curTick()) + + accessLatency; + } assert(blk->tag == blkAddr); tmp_in_cache = blk->inCache; for (unsigned i = 0; i < numCaches; i++) { @@ -200,6 +210,8 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src, moveToHead(blk); } } else { + // If a cache miss + lat = lookupLatency; blk = nullptr; for (unsigned i = 0; i <= numCaches; ++i) { misses[i]++; @@ -209,7 +221,6 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src, *inCache = tmp_in_cache; } - lat = accessLatency; //assert(check()); return blk; } diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh index 1bbd9fbfb..710dfafbf 100644 --- a/src/mem/cache/tags/fa_lru.hh +++ b/src/mem/cache/tags/fa_lru.hh @@ -51,6 +51,7 @@ #include <list> #include <unordered_map> +#include "mem/cache/base.hh" #include "mem/cache/blk.hh" #include "mem/cache/tags/base.hh" #include "mem/packet.hh" |