summaryrefslogtreecommitdiff
path: root/src/mem/cache/cache_impl.hh
diff options
context:
space:
mode:
authorAndreas Sandberg <Andreas.Sandberg@ARM.com>2013-02-15 17:40:09 -0500
committerAndreas Sandberg <Andreas.Sandberg@ARM.com>2013-02-15 17:40:09 -0500
commitb904bd5437ead0dfc2c4c0977f3d29d63299c601 (patch)
treef7d324fe5c806338534c5e41e9b251d2e62a3132 /src/mem/cache/cache_impl.hh
parent1eec115c31395e2819c073a1859d75eb5933dac2 (diff)
downloadgem5-b904bd5437ead0dfc2c4c0977f3d29d63299c601.tar.xz
sim: Add a system-global option to bypass caches
Virtualized CPUs and the fastmem mode of the atomic CPU require direct access to physical memory. We currently require caches to be disabled when using them to prevent chaos. This is not ideal when switching between hardware virutalized CPUs and other CPU models as it would require a configuration change on each switch. This changeset introduces a new version of the atomic memory mode, 'atomic_noncaching', where memory accesses are inserted into the memory system as atomic accesses, but bypass caches. To make memory mode tests cleaner, the following methods are added to the System class: * isAtomicMode() -- True if the memory mode is 'atomic' or 'direct'. * isTimingMode() -- True if the memory mode is 'timing'. * bypassCaches() -- True if caches should be bypassed. The old getMemoryMode() and setMemoryMode() methods should never be used from the C++ world anymore.
Diffstat (limited to 'src/mem/cache/cache_impl.hh')
-rw-r--r--src/mem/cache/cache_impl.hh29
1 files changed, 29 insertions, 0 deletions
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index a1d945103..21c8e16d6 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -390,6 +390,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
// must be cache-to-cache response from upper to lower level
ForwardResponseRecord *rec =
dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
+ assert(!system->bypassCaches());
if (rec == NULL) {
assert(pkt->cmd == MemCmd::HardPFResp);
@@ -409,6 +410,12 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
assert(pkt->isRequest());
+ // Just forward the packet if caches are disabled.
+ if (system->bypassCaches()) {
+ memSidePort->sendTimingReq(pkt);
+ return true;
+ }
+
if (pkt->memInhibitAsserted()) {
DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
pkt->getAddr());
@@ -629,6 +636,10 @@ Cache<TagStore>::atomicAccess(PacketPtr pkt)
// @TODO: make this a parameter
bool last_level_cache = false;
+ // Forward the request if the system is in cache bypass mode.
+ if (system->bypassCaches())
+ return memSidePort->sendAtomic(pkt);
+
if (pkt->memInhibitAsserted()) {
assert(!pkt->req->isUncacheable());
// have to invalidate ourselves and any lower caches even if
@@ -744,6 +755,17 @@ template<class TagStore>
void
Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
{
+ if (system->bypassCaches()) {
+ // Packets from the memory side are snoop request and
+ // shouldn't happen in bypass mode.
+ assert(fromCpuSide);
+
+ // The cache should be flushed if we are in cache bypass mode,
+ // so we don't need to check if we need to update anything.
+ memSidePort->sendFunctional(pkt);
+ return;
+ }
+
Addr blk_addr = blockAlign(pkt->getAddr());
BlkType *blk = tags->findBlock(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr);
@@ -1354,6 +1376,9 @@ template<class TagStore>
void
Cache<TagStore>::snoopTiming(PacketPtr pkt)
{
+ // Snoops shouldn't happen when bypassing caches
+ assert(!system->bypassCaches());
+
// Note that some deferred snoops don't have requests, since the
// original access may have already completed
if ((pkt->req && pkt->req->isUncacheable()) ||
@@ -1438,6 +1463,9 @@ template<class TagStore>
Cycles
Cache<TagStore>::snoopAtomic(PacketPtr pkt)
{
+ // Snoops shouldn't happen when bypassing caches
+ assert(!system->bypassCaches());
+
if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
// Can't get a hit on an uncacheable address
// Revisit this for multi level coherence
@@ -1683,6 +1711,7 @@ Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
{
// always let inhibited requests through even if blocked
if (!pkt->memInhibitAsserted() && blocked) {
+ assert(!cache->system->bypassCaches());
DPRINTF(Cache,"Scheduling a retry while blocked\n");
mustSendRetry = true;
return false;