summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configs/common/Caches.py3
-rw-r--r--configs/common/O3_ARM_v7a.py3
-rw-r--r--configs/example/memcheck.py3
-rw-r--r--configs/example/memtest.py3
-rw-r--r--src/mem/cache/BaseCache.py1
-rw-r--r--src/mem/cache/base.cc1
-rw-r--r--src/mem/cache/base.hh5
-rw-r--r--src/mem/cache/cache_impl.hh25
8 files changed, 15 insertions, 29 deletions
diff --git a/configs/common/Caches.py b/configs/common/Caches.py
index 2bdffc6c7..cfb873b5e 100644
--- a/configs/common/Caches.py
+++ b/configs/common/Caches.py
@@ -52,7 +52,6 @@ class L1Cache(BaseCache):
response_latency = 2
mshrs = 4
tgts_per_mshr = 20
- is_top_level = True
class L1_ICache(L1Cache):
is_read_only = True
@@ -76,7 +75,6 @@ class IOCache(BaseCache):
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
- is_top_level = True
class PageTableWalkerCache(BaseCache):
assoc = 2
@@ -86,7 +84,6 @@ class PageTableWalkerCache(BaseCache):
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
- is_top_level = True
# the x86 table walker actually writes to the table-walker cache
if buildEnv['TARGET_ISA'] == 'x86':
is_read_only = False
diff --git a/configs/common/O3_ARM_v7a.py b/configs/common/O3_ARM_v7a.py
index b4b66df9c..dbfdf6c41 100644
--- a/configs/common/O3_ARM_v7a.py
+++ b/configs/common/O3_ARM_v7a.py
@@ -149,7 +149,6 @@ class O3_ARM_v7a_ICache(BaseCache):
tgts_per_mshr = 8
size = '32kB'
assoc = 2
- is_top_level = True
forward_snoops = False
is_read_only = True
@@ -162,7 +161,6 @@ class O3_ARM_v7a_DCache(BaseCache):
size = '32kB'
assoc = 2
write_buffers = 16
- is_top_level = True
# TLB Cache
# Use a cache as a L2 TLB
@@ -174,7 +172,6 @@ class O3_ARM_v7aWalkCache(BaseCache):
size = '1kB'
assoc = 8
write_buffers = 16
- is_top_level = True
forward_snoops = False
is_read_only = True
diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py
index f0bc26e32..ca2659ed0 100644
--- a/configs/example/memcheck.py
+++ b/configs/example/memcheck.py
@@ -154,7 +154,7 @@ for t, m in zip(testerspec, multiplier):
# Define a prototype L1 cache that we scale for all successive levels
proto_l1 = BaseCache(size = '32kB', assoc = 4,
hit_latency = 1, response_latency = 1,
- tgts_per_mshr = 8, is_top_level = True)
+ tgts_per_mshr = 8)
if options.blocking:
proto_l1.mshrs = 1
@@ -179,7 +179,6 @@ for scale in cachespec[:-1]:
next.response_latency = prev.response_latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
- next.is_top_level = False
cache_proto.insert(0, next)
# Create a config to be used by all the traffic generators
diff --git a/configs/example/memtest.py b/configs/example/memtest.py
index 0df9f766f..a51bd2796 100644
--- a/configs/example/memtest.py
+++ b/configs/example/memtest.py
@@ -177,7 +177,7 @@ else:
# Define a prototype L1 cache that we scale for all successive levels
proto_l1 = BaseCache(size = '32kB', assoc = 4,
hit_latency = 1, response_latency = 1,
- tgts_per_mshr = 8, is_top_level = True)
+ tgts_per_mshr = 8)
if options.blocking:
proto_l1.mshrs = 1
@@ -197,7 +197,6 @@ for scale in cachespec[:-1]:
next.response_latency = prev.response_latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
- next.is_top_level = False
cache_proto.insert(0, next)
# Make a prototype for the tester to be used throughout
diff --git a/src/mem/cache/BaseCache.py b/src/mem/cache/BaseCache.py
index 4d6766456..d908430e5 100644
--- a/src/mem/cache/BaseCache.py
+++ b/src/mem/cache/BaseCache.py
@@ -64,7 +64,6 @@ class BaseCache(MemObject):
forward_snoops = Param.Bool(True,
"Forward snoops from mem side to cpu side")
- is_top_level = Param.Bool(False, "Is this cache at the top level (e.g. L1)")
is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)")
prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index b1c512079..1d0b9a3dd 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -78,7 +78,6 @@ BaseCache::BaseCache(const Params *p)
responseLatency(p->response_latency),
numTarget(p->tgts_per_mshr),
forwardSnoops(p->forward_snoops),
- isTopLevel(p->is_top_level),
isReadOnly(p->is_read_only),
blocked(0),
order(0),
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index d2cb11f33..6c87fad12 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -304,11 +304,6 @@ class BaseCache : public MemObject
/** Do we forward snoops from mem side port through to cpu side port? */
const bool forwardSnoops;
- /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
- * never try to forward ownership and similar optimizations to the cpu
- * side */
- const bool isTopLevel;
-
/**
* Is this cache read only, for example the instruction cache, or
* table-walker cache. A cache that is read only should never see
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 39206ca84..9dec54f45 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -179,7 +179,15 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
blk->trackLoadLocked(pkt);
}
pkt->setDataFromBlock(blk->data, blkSize);
- if (pkt->getSize() == blkSize) {
+ // determine if this read is from a (coherent) cache, or not
+ // by looking at the command type; we could potentially add a
+ // packet attribute such as 'FromCache' to make this check a
+ // bit cleaner
+ if (pkt->cmd == MemCmd::ReadExReq ||
+ pkt->cmd == MemCmd::ReadSharedReq ||
+ pkt->cmd == MemCmd::ReadCleanReq ||
+ pkt->cmd == MemCmd::SCUpgradeFailReq) {
+ assert(pkt->getSize() == blkSize);
// special handling for coherent block requests from
// upper-level caches
if (pkt->needsExclusive()) {
@@ -211,7 +219,7 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
if (blk->isDirty()) {
// special considerations if we're owner:
- if (!deferred_response && !isTopLevel) {
+ if (!deferred_response) {
// if we are responding immediately and can
// signal that we're transferring ownership
// along with exclusivity, do so
@@ -526,7 +534,6 @@ Cache::promoteWholeLineWrites(PacketPtr pkt)
(pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
pkt->cmd = MemCmd::WriteLineReq;
DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
- assert(isTopLevel); // should only happen at L1 or I/O cache
}
}
@@ -696,7 +703,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// processing happens before any MSHR munging on the behalf of
// this request because this new Request will be the one stored
// into the MSHRs, not the original.
- if (pkt->cmd.isSWPrefetch() && isTopLevel) {
+ if (pkt->cmd.isSWPrefetch()) {
assert(needsResponse);
assert(pkt->req->hasPaddr());
assert(!pkt->req->isUncacheable());
@@ -905,7 +912,6 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
// the line in exclusive state, and invalidates all other
// copies
cmd = MemCmd::InvalidateReq;
- assert(isTopLevel);
} else {
// block is invalid
cmd = needsExclusive ? MemCmd::ReadExReq :
@@ -1034,17 +1040,12 @@ Cache::recvAtomic(PacketPtr pkt)
pkt->makeAtomicResponse();
pkt->copyError(bus_pkt);
} else if (pkt->cmd == MemCmd::InvalidateReq) {
- assert(!isTopLevel);
if (blk) {
// invalidate response to a cache that received
// an invalidate request
satisfyCpuSideRequest(pkt, blk);
}
} else if (pkt->cmd == MemCmd::WriteLineReq) {
- // invalidate response to the cache that
- // received the original write-line request
- assert(isTopLevel);
-
// note the use of pkt, not bus_pkt here.
// write-line request to the cache that promoted
@@ -1256,7 +1257,7 @@ Cache::recvTimingResp(PacketPtr pkt)
completion_time = pkt->headerDelay;
// Software prefetch handling for cache closest to core
- if (tgt_pkt->cmd.isSWPrefetch() && isTopLevel) {
+ if (tgt_pkt->cmd.isSWPrefetch()) {
// a software prefetch would have already been ack'd immediately
// with dummy data so the core would be able to retire it.
// this request completes right here, so we deallocate it.
@@ -2148,7 +2149,7 @@ Cache::getNextMSHR()
bool
Cache::isCachedAbove(const PacketPtr pkt) const
{
- if (isTopLevel)
+ if (!forwardSnoops)
return false;
// Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
// Writeback snoops into upper level caches to check for copies of the