diff options
author | Andreas Hansson <andreas.hansson@arm.com> | 2015-07-03 10:14:43 -0400 |
---|---|---|
committer | Andreas Hansson <andreas.hansson@arm.com> | 2015-07-03 10:14:43 -0400 |
commit | b93c912013cd7f5417b92eaa33010af70e97f8ec (patch) | |
tree | e381afa1a581e3d676e9c91d9999c490fa6b1a23 /configs/example | |
parent | 71856cfbbcac94997839ac7831b3ac4b2ddf29a2 (diff) | |
download | gem5-b93c912013cd7f5417b92eaa33010af70e97f8ec.tar.xz |
mem: Remove redundant is_top_level cache parameter
This patch takes the final step in removing the is_top_level parameter
from the cache. With the recent changes to read requests and write
invalidations, the parameter is no longer needed, and consequently
removed.
This also means that asymmetric cache hierarchies are now fully
supported (and we are actually using them already with L1 caches, but
no table-walker caches, connected to a shared L2).
Diffstat (limited to 'configs/example')
-rw-r--r-- | configs/example/memcheck.py | 3 | ||||
-rw-r--r-- | configs/example/memtest.py | 3 |
2 files changed, 2 insertions, 4 deletions
diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py index f0bc26e32..ca2659ed0 100644 --- a/configs/example/memcheck.py +++ b/configs/example/memcheck.py @@ -154,7 +154,7 @@ for t, m in zip(testerspec, multiplier): # Define a prototype L1 cache that we scale for all successive levels proto_l1 = BaseCache(size = '32kB', assoc = 4, hit_latency = 1, response_latency = 1, - tgts_per_mshr = 8, is_top_level = True) + tgts_per_mshr = 8) if options.blocking: proto_l1.mshrs = 1 @@ -179,7 +179,6 @@ for scale in cachespec[:-1]: next.response_latency = prev.response_latency * 10 next.assoc = prev.assoc * scale next.mshrs = prev.mshrs * scale - next.is_top_level = False cache_proto.insert(0, next) # Create a config to be used by all the traffic generators diff --git a/configs/example/memtest.py b/configs/example/memtest.py index 0df9f766f..a51bd2796 100644 --- a/configs/example/memtest.py +++ b/configs/example/memtest.py @@ -177,7 +177,7 @@ else: # Define a prototype L1 cache that we scale for all successive levels proto_l1 = BaseCache(size = '32kB', assoc = 4, hit_latency = 1, response_latency = 1, - tgts_per_mshr = 8, is_top_level = True) + tgts_per_mshr = 8) if options.blocking: proto_l1.mshrs = 1 @@ -197,7 +197,6 @@ for scale in cachespec[:-1]: next.response_latency = prev.response_latency * 10 next.assoc = prev.assoc * scale next.mshrs = prev.mshrs * scale - next.is_top_level = False cache_proto.insert(0, next) # Make a prototype for the tester to be used throughout |