summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/cache/BaseCache.py1
-rw-r--r--src/mem/cache/base.cc1
-rw-r--r--src/mem/cache/base.hh5
-rw-r--r--src/mem/cache/cache_impl.hh25
4 files changed, 13 insertions, 19 deletions
diff --git a/src/mem/cache/BaseCache.py b/src/mem/cache/BaseCache.py
index 4d6766456..d908430e5 100644
--- a/src/mem/cache/BaseCache.py
+++ b/src/mem/cache/BaseCache.py
@@ -64,7 +64,6 @@ class BaseCache(MemObject):
forward_snoops = Param.Bool(True,
"Forward snoops from mem side to cpu side")
- is_top_level = Param.Bool(False, "Is this cache at the top level (e.g. L1)")
is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)")
prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index b1c512079..1d0b9a3dd 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -78,7 +78,6 @@ BaseCache::BaseCache(const Params *p)
responseLatency(p->response_latency),
numTarget(p->tgts_per_mshr),
forwardSnoops(p->forward_snoops),
- isTopLevel(p->is_top_level),
isReadOnly(p->is_read_only),
blocked(0),
order(0),
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index d2cb11f33..6c87fad12 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -304,11 +304,6 @@ class BaseCache : public MemObject
/** Do we forward snoops from mem side port through to cpu side port? */
const bool forwardSnoops;
- /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
- * never try to forward ownership and similar optimizations to the cpu
- * side */
- const bool isTopLevel;
-
/**
* Is this cache read only, for example the instruction cache, or
* table-walker cache. A cache that is read only should never see
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 39206ca84..9dec54f45 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -179,7 +179,15 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
blk->trackLoadLocked(pkt);
}
pkt->setDataFromBlock(blk->data, blkSize);
- if (pkt->getSize() == blkSize) {
+ // determine if this read is from a (coherent) cache, or not
+ // by looking at the command type; we could potentially add a
+ // packet attribute such as 'FromCache' to make this check a
+ // bit cleaner
+ if (pkt->cmd == MemCmd::ReadExReq ||
+ pkt->cmd == MemCmd::ReadSharedReq ||
+ pkt->cmd == MemCmd::ReadCleanReq ||
+ pkt->cmd == MemCmd::SCUpgradeFailReq) {
+ assert(pkt->getSize() == blkSize);
// special handling for coherent block requests from
// upper-level caches
if (pkt->needsExclusive()) {
@@ -211,7 +219,7 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
if (blk->isDirty()) {
// special considerations if we're owner:
- if (!deferred_response && !isTopLevel) {
+ if (!deferred_response) {
// if we are responding immediately and can
// signal that we're transferring ownership
// along with exclusivity, do so
@@ -526,7 +534,6 @@ Cache::promoteWholeLineWrites(PacketPtr pkt)
(pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
pkt->cmd = MemCmd::WriteLineReq;
DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
- assert(isTopLevel); // should only happen at L1 or I/O cache
}
}
@@ -696,7 +703,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// processing happens before any MSHR munging on the behalf of
// this request because this new Request will be the one stored
// into the MSHRs, not the original.
- if (pkt->cmd.isSWPrefetch() && isTopLevel) {
+ if (pkt->cmd.isSWPrefetch()) {
assert(needsResponse);
assert(pkt->req->hasPaddr());
assert(!pkt->req->isUncacheable());
@@ -905,7 +912,6 @@ Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
// the line in exclusive state, and invalidates all other
// copies
cmd = MemCmd::InvalidateReq;
- assert(isTopLevel);
} else {
// block is invalid
cmd = needsExclusive ? MemCmd::ReadExReq :
@@ -1034,17 +1040,12 @@ Cache::recvAtomic(PacketPtr pkt)
pkt->makeAtomicResponse();
pkt->copyError(bus_pkt);
} else if (pkt->cmd == MemCmd::InvalidateReq) {
- assert(!isTopLevel);
if (blk) {
// invalidate response to a cache that received
// an invalidate request
satisfyCpuSideRequest(pkt, blk);
}
} else if (pkt->cmd == MemCmd::WriteLineReq) {
- // invalidate response to the cache that
- // received the original write-line request
- assert(isTopLevel);
-
// note the use of pkt, not bus_pkt here.
// write-line request to the cache that promoted
@@ -1256,7 +1257,7 @@ Cache::recvTimingResp(PacketPtr pkt)
completion_time = pkt->headerDelay;
// Software prefetch handling for cache closest to core
- if (tgt_pkt->cmd.isSWPrefetch() && isTopLevel) {
+ if (tgt_pkt->cmd.isSWPrefetch()) {
// a software prefetch would have already been ack'd immediately
// with dummy data so the core would be able to retire it.
// this request completes right here, so we deallocate it.
@@ -2148,7 +2149,7 @@ Cache::getNextMSHR()
bool
Cache::isCachedAbove(const PacketPtr pkt) const
{
- if (isTopLevel)
+ if (!forwardSnoops)
return false;
// Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
// Writeback snoops into upper level caches to check for copies of the