summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikos Nikoleris <nikos.nikoleris@arm.com>2016-05-26 11:56:24 +0100
committerNikos Nikoleris <nikos.nikoleris@arm.com>2016-05-26 11:56:24 +0100
commit90bf50b4c7249a4f5529245a82bc59370de468bd (patch)
treed239057e95ba840d516f0e87797dd59f6b4fc0e7
parentf385adc8af744e7f6b78ff9adafd9c3ecf65ce68 (diff)
downloadgem5-90bf50b4c7249a4f5529245a82bc59370de468bd.tar.xz
mem: fix the line length in the cache related classes
Change-Id: I6d1feb164a958dde0da87a1cd2698096112c4a82 Reviewed-by: Andreas Sandberg <andreas.sandberg@arm.com>
-rw-r--r--src/mem/cache/base.cc12
-rw-r--r--src/mem/cache/base.hh6
-rw-r--r--src/mem/cache/cache.cc43
-rw-r--r--src/mem/cache/tags/fa_lru.hh4
4 files changed, 37 insertions, 28 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 4d7d462fb..81f137ab9 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -599,7 +599,8 @@ BaseCache::regStats()
.flags(total | nozero | nonan)
;
for (int i = 0; i < system->maxMasters(); i++) {
- mshr_uncacheable_lat[access_idx].subname(i, system->getMasterName(i));
+ mshr_uncacheable_lat[access_idx].subname(
+ i, system->getMasterName(i));
}
}
@@ -699,7 +700,8 @@ BaseCache::regStats()
mshr_miss_latency[access_idx] / mshr_misses[access_idx];
for (int i = 0; i < system->maxMasters(); i++) {
- avgMshrMissLatency[access_idx].subname(i, system->getMasterName(i));
+ avgMshrMissLatency[access_idx].subname(
+ i, system->getMasterName(i));
}
}
@@ -737,7 +739,8 @@ BaseCache::regStats()
mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
for (int i = 0; i < system->maxMasters(); i++) {
- avgMshrUncacheableLatency[access_idx].subname(i, system->getMasterName(i));
+ avgMshrUncacheableLatency[access_idx].subname(
+ i, system->getMasterName(i));
}
}
@@ -746,7 +749,8 @@ BaseCache::regStats()
.desc("average overall mshr uncacheable latency")
.flags(total | nozero | nonan)
;
- overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
+ overallAvgMshrUncacheableLatency =
+ overallMshrUncacheableLatency / overallMshrUncacheable;
for (int i = 0; i < system->maxMasters(); i++) {
overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index ffff6f058..716969070 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -328,14 +328,16 @@ class BaseCache : public MemObject
* @{
*/
- /** Number of hits per thread for each type of command. @sa Packet::Command */
+ /** Number of hits per thread for each type of command.
+ @sa Packet::Command */
Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
/** Number of hits for demand accesses. */
Stats::Formula demandHits;
/** Number of hit for all accesses. */
Stats::Formula overallHits;
- /** Number of misses per thread for each type of command. @sa Packet::Command */
+ /** Number of misses per thread for each type of command.
+ @sa Packet::Command */
Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
/** Number of misses for demand accesses. */
Stats::Formula demandMisses;
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index a64fc0c9c..09080fb41 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -440,7 +440,8 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
// go to next level.
return false;
} else if ((blk != NULL) &&
- (pkt->needsWritable() ? blk->isWritable() : blk->isReadable())) {
+ (pkt->needsWritable() ? blk->isWritable() :
+ blk->isReadable())) {
// OK to satisfy access
incHitCount(pkt);
satisfyCpuSideRequest(pkt, blk);
@@ -709,7 +710,8 @@ Cache::recvTimingReq(PacketPtr pkt)
// hit (for all other request types)
- if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
+ if (prefetcher && (prefetchOnAccess ||
+ (blk && blk->wasPrefetched()))) {
if (blk)
blk->status &= ~BlkHWPrefetched;
@@ -808,9 +810,9 @@ Cache::recvTimingReq(PacketPtr pkt)
if (pkt->cmd == MemCmd::CleanEvict) {
pendingDelete.reset(pkt);
} else {
- DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx size %d\n",
- __func__, pkt->cmdString(), pkt->getAddr(),
- pkt->getSize());
+ DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx "
+ "size %d\n", __func__, pkt->cmdString(),
+ pkt->getAddr(), pkt->getSize());
assert(pkt->req->masterId() < system->maxMasters());
mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
@@ -833,12 +835,12 @@ Cache::recvTimingReq(PacketPtr pkt)
}
}
// We should call the prefetcher reguardless if the request is
- // satisfied or not, reguardless if the request is in the MSHR or
- // not. The request could be a ReadReq hit, but still not
+ // satisfied or not, reguardless if the request is in the MSHR
+ // or not. The request could be a ReadReq hit, but still not
// satisfied (potentially because of a prior write to the same
// cache line. So, even when not satisfied, tehre is an MSHR
- // already allocated for this, we need to let the prefetcher know
- // about the request
+ // already allocated for this, we need to let the prefetcher
+ // know about the request
if (prefetcher) {
// Don't notify on SWPrefetch
if (!pkt->cmd.isSWPrefetch())
@@ -1054,8 +1056,8 @@ Cache::recvAtomic(PacketPtr pkt)
bool is_invalidate = bus_pkt->isInvalidate();
// We are now dealing with the response handling
- DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in state %i\n",
- bus_pkt->cmdString(), bus_pkt->getAddr(),
+ DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in "
+ "state %i\n", bus_pkt->cmdString(), bus_pkt->getAddr(),
bus_pkt->isSecure() ? "s" : "ns",
old_state);
@@ -1336,9 +1338,10 @@ Cache::recvTimingResp(PacketPtr pkt)
// Software prefetch handling for cache closest to core
if (tgt_pkt->cmd.isSWPrefetch()) {
- // a software prefetch would have already been ack'd immediately
- // with dummy data so the core would be able to retire it.
- // this request completes right here, so we deallocate it.
+ // a software prefetch would have already been ack'd
+ // immediately with dummy data so the core would be able to
+ // retire it. This request completes right here, so we
+ // deallocate it.
delete tgt_pkt->req;
delete tgt_pkt;
break; // skip response
@@ -1673,8 +1676,8 @@ Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
// allocation failed, block not inserted
return NULL;
} else {
- DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n",
- repl_addr, blk->isSecure() ? "s" : "ns",
+ DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
+ "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns",
addr, is_secure ? "s" : "ns",
blk->isDirty() ? "writeback" : "clean");
@@ -1978,8 +1981,8 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// above and in it's own cache, a new MemCmd::ReadReq is created that
// downstream caches observe.
if (pkt->mustCheckAbove()) {
- DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from"
- " lower cache\n", pkt->getAddr(), pkt->cmdString());
+ DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
+ "from lower cache\n", pkt->getAddr(), pkt->cmdString());
pkt->setBlockCached();
return snoop_delay;
}
@@ -2487,8 +2490,8 @@ Cache::serialize(CheckpointOut &cp) const
if (dirty) {
warn("*** The cache still contains dirty data. ***\n");
warn(" Make sure to drain the system using the correct flags.\n");
- warn(" This checkpoint will not restore correctly and dirty data in "
- "the cache will be lost!\n");
+ warn(" This checkpoint will not restore correctly and dirty data "
+ " in the cache will be lost!\n");
}
// Since we don't checkpoint the data in the cache, any dirty data
diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh
index 2c34be08f..0dd402cea 100644
--- a/src/mem/cache/tags/fa_lru.hh
+++ b/src/mem/cache/tags/fa_lru.hh
@@ -177,8 +177,8 @@ public:
void invalidate(CacheBlk *blk) override;
/**
- * Access block and update replacement data. May not succeed, in which case
- * NULL pointer is returned. This has all the implications of a cache
+ * Access block and update replacement data. May not succeed, in which
+ * case NULL pointer is returned. This has all the implications of a cache
* access and should only be used as such.
* Returns the access latency and inCache flags as a side effect.
* @param addr The address to look for.