From ce2722cdd97a31f85d36f6c32637b230e3c25c73 Mon Sep 17 00:00:00 2001 From: Sophiane Senni Date: Wed, 30 Nov 2016 17:10:27 -0500 Subject: mem: Split the hit_latency into tag_latency and data_latency If the cache access mode is parallel, i.e. "sequential_access" parameter is set to "False", tags and data are accessed in parallel. Therefore, the hit_latency is the maximum latency between tag_latency and data_latency. On the other hand, if the cache access mode is sequential, i.e. "sequential_access" parameter is set to "True", tags and data are accessed sequentially. Therefore, the hit_latency is the sum of tag_latency plus data_latency. Signed-off-by: Jason Lowe-Power --- src/mem/cache/Cache.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/mem/cache/Cache.py') diff --git a/src/mem/cache/Cache.py b/src/mem/cache/Cache.py index 263b2fea8..dce7e5bf4 100644 --- a/src/mem/cache/Cache.py +++ b/src/mem/cache/Cache.py @@ -53,7 +53,8 @@ class BaseCache(MemObject): size = Param.MemorySize("Capacity") assoc = Param.Unsigned("Associativity") - hit_latency = Param.Cycles("Hit latency") + tag_latency = Param.Cycles("Tag lookup latency") + data_latency = Param.Cycles("Data access latency") response_latency = Param.Cycles("Latency for the return path on a miss"); max_miss_count = Param.Counter(0, -- cgit v1.2.3