diff options
author | Sophiane Senni <sophiane.senni@gmail.com> | 2016-11-30 17:10:27 -0500 |
---|---|---|
committer | Sophiane Senni <sophiane.senni@gmail.com> | 2016-11-30 17:10:27 -0500 |
commit | ce2722cdd97a31f85d36f6c32637b230e3c25c73 (patch) | |
tree | 72993532267d3f1f99e8519be837dd7c523a722f /configs/example | |
parent | 047caf24ba9a640247b63584c2291e760f1f4d54 (diff) | |
download | gem5-ce2722cdd97a31f85d36f6c32637b230e3c25c73.tar.xz |
mem: Split the hit_latency into tag_latency and data_latency
If the cache access mode is parallel, i.e. "sequential_access" parameter
is set to "False", tags and data are accessed in parallel. Therefore,
the hit_latency is the maximum latency between tag_latency and
data_latency. On the other hand, if the cache access mode is
sequential, i.e. "sequential_access" parameter is set to "True",
tags and data are accessed sequentially. Therefore, the hit_latency
is the sum of tag_latency plus data_latency.
Signed-off-by: Jason Lowe-Power <jason@lowepower.com>
Diffstat (limited to 'configs/example')
-rw-r--r-- | configs/example/arm/devices.py | 15 | ||||
-rw-r--r-- | configs/example/memcheck.py | 5 | ||||
-rw-r--r-- | configs/example/memtest.py | 5 |
3 files changed, 16 insertions, 9 deletions
diff --git a/configs/example/arm/devices.py b/configs/example/arm/devices.py index 815e94f0c..6734aaf5c 100644 --- a/configs/example/arm/devices.py +++ b/configs/example/arm/devices.py @@ -45,7 +45,8 @@ from common.Caches import * from common import CpuConfig class L1I(L1_ICache): - hit_latency = 1 + tag_latency = 1 + data_latency = 1 response_latency = 1 mshrs = 4 tgts_per_mshr = 8 @@ -54,7 +55,8 @@ class L1I(L1_ICache): class L1D(L1_DCache): - hit_latency = 2 + tag_latency = 2 + data_latency = 2 response_latency = 1 mshrs = 16 tgts_per_mshr = 16 @@ -64,7 +66,8 @@ class L1D(L1_DCache): class WalkCache(PageTableWalkerCache): - hit_latency = 4 + tag_latency = 4 + data_latency = 4 response_latency = 4 mshrs = 6 tgts_per_mshr = 8 @@ -74,7 +77,8 @@ class WalkCache(PageTableWalkerCache): class L2(L2Cache): - hit_latency = 12 + tag_latency = 12 + data_latency = 12 response_latency = 5 mshrs = 32 tgts_per_mshr = 8 @@ -87,7 +91,8 @@ class L2(L2Cache): class L3(Cache): size = '16MB' assoc = 16 - hit_latency = 20 + tag_latency = 20 + data_latency = 20 response_latency = 20 mshrs = 20 tgts_per_mshr = 12 diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py index 17cab8b04..0feaaf0b4 100644 --- a/configs/example/memcheck.py +++ b/configs/example/memcheck.py @@ -153,7 +153,7 @@ for t, m in zip(testerspec, multiplier): # Define a prototype L1 cache that we scale for all successive levels proto_l1 = Cache(size = '32kB', assoc = 4, - hit_latency = 1, response_latency = 1, + tag_latency = 1, data_latency = 1, response_latency = 1, tgts_per_mshr = 8) if options.blocking: @@ -175,7 +175,8 @@ for scale in cachespec[:-1]: prev = cache_proto[0] next = prev() next.size = prev.size * scale - next.hit_latency = prev.hit_latency * 10 + next.tag_latency = prev.tag_latency * 10 + next.data_latency = prev.data_latency * 10 next.response_latency = prev.response_latency * 10 next.assoc = prev.assoc * scale next.mshrs = prev.mshrs * scale diff --git a/configs/example/memtest.py b/configs/example/memtest.py index 97bf79dff..d6f940e6b 100644 --- a/configs/example/memtest.py +++ b/configs/example/memtest.py @@ -176,7 +176,7 @@ else: # Define a prototype L1 cache that we scale for all successive levels proto_l1 = Cache(size = '32kB', assoc = 4, - hit_latency = 1, response_latency = 1, + tag_latency = 1, data_latency = 1, response_latency = 1, tgts_per_mshr = 8, clusivity = 'mostly_incl', writeback_clean = True) @@ -194,7 +194,8 @@ for scale in cachespec[:-1]: prev = cache_proto[0] next = prev() next.size = prev.size * scale - next.hit_latency = prev.hit_latency * 10 + next.tag_latency = prev.tag_latency * 10 + next.data_latency = prev.data_latency * 10 next.response_latency = prev.response_latency * 10 next.assoc = prev.assoc * scale next.mshrs = prev.mshrs * scale |