summaryrefslogtreecommitdiff
path: root/configs/example/memtest.py
diff options
context:
space:
mode:
authorSophiane Senni <sophiane.senni@gmail.com>2016-11-30 17:10:27 -0500
committerSophiane Senni <sophiane.senni@gmail.com>2016-11-30 17:10:27 -0500
commitce2722cdd97a31f85d36f6c32637b230e3c25c73 (patch)
tree72993532267d3f1f99e8519be837dd7c523a722f /configs/example/memtest.py
parent047caf24ba9a640247b63584c2291e760f1f4d54 (diff)
downloadgem5-ce2722cdd97a31f85d36f6c32637b230e3c25c73.tar.xz
mem: Split the hit_latency into tag_latency and data_latency
If the cache access mode is parallel, i.e. "sequential_access" parameter is set to "False", tags and data are accessed in parallel. Therefore, the hit_latency is the maximum latency between tag_latency and data_latency. On the other hand, if the cache access mode is sequential, i.e. "sequential_access" parameter is set to "True", tags and data are accessed sequentially. Therefore, the hit_latency is the sum of tag_latency plus data_latency. Signed-off-by: Jason Lowe-Power <jason@lowepower.com>
Diffstat (limited to 'configs/example/memtest.py')
-rw-r--r--configs/example/memtest.py5
1 files changed, 3 insertions, 2 deletions
diff --git a/configs/example/memtest.py b/configs/example/memtest.py
index 97bf79dff..d6f940e6b 100644
--- a/configs/example/memtest.py
+++ b/configs/example/memtest.py
@@ -176,7 +176,7 @@ else:
# Define a prototype L1 cache that we scale for all successive levels
proto_l1 = Cache(size = '32kB', assoc = 4,
- hit_latency = 1, response_latency = 1,
+ tag_latency = 1, data_latency = 1, response_latency = 1,
tgts_per_mshr = 8, clusivity = 'mostly_incl',
writeback_clean = True)
@@ -194,7 +194,8 @@ for scale in cachespec[:-1]:
prev = cache_proto[0]
next = prev()
next.size = prev.size * scale
- next.hit_latency = prev.hit_latency * 10
+ next.tag_latency = prev.tag_latency * 10
+ next.data_latency = prev.data_latency * 10
next.response_latency = prev.response_latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale