summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mem/cache/base.cc2
-rw-r--r--src/mem/cache/cache.cc42
-rw-r--r--src/mem/cache/cache.hh6
-rw-r--r--src/mem/cache/mshr.cc6
-rw-r--r--src/mem/cache/prefetch/queued.cc4
-rw-r--r--src/mem/cache/queue.hh2
-rw-r--r--src/mem/cache/tags/base_set_assoc.hh8
-rw-r--r--src/mem/cache/tags/cacheset.hh2
-rw-r--r--src/mem/cache/tags/fa_lru.cc16
-rw-r--r--src/mem/cache/tags/fa_lru.hh4
-rw-r--r--src/mem/cache/tags/lru.cc4
-rw-r--r--src/mem/cache/write_queue_entry.cc2
12 files changed, 49 insertions, 49 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 81f137ab9..c11208021 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -80,7 +80,7 @@ BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
isReadOnly(p->is_read_only),
blocked(0),
order(0),
- noTargetMSHR(NULL),
+ noTargetMSHR(nullptr),
missCount(p->max_miss_count),
addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
system(p->system)
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 09080fb41..84f81e4ab 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -322,7 +322,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
old_blk->invalidate();
}
- blk = NULL;
+ blk = nullptr;
// lookupLatency is the latency in case the request is uncacheable.
lat = lookupLatency;
return false;
@@ -394,10 +394,10 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
return true;
}
- if (blk == NULL) {
+ if (blk == nullptr) {
// need to do a replacement
blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
- if (blk == NULL) {
+ if (blk == nullptr) {
// no replaceable block available: give up, fwd to next level.
incMissCount(pkt);
return false;
@@ -427,7 +427,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
incHitCount(pkt);
return true;
} else if (pkt->cmd == MemCmd::CleanEvict) {
- if (blk != NULL) {
+ if (blk != nullptr) {
// Found the block in the tags, need to stop CleanEvict from
// propagating further down the hierarchy. Returning true will
// treat the CleanEvict like a satisfied write request and delete
@@ -439,7 +439,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
// like a Writeback which could not find a replaceable block so has to
// go to next level.
return false;
- } else if ((blk != NULL) &&
+ } else if ((blk != nullptr) &&
(pkt->needsWritable() ? blk->isWritable() :
blk->isReadable())) {
// OK to satisfy access
@@ -448,12 +448,12 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
return true;
}
- // Can't satisfy access normally... either no block (blk == NULL)
+ // Can't satisfy access normally... either no block (blk == nullptr)
// or have block but need writable
incMissCount(pkt);
- if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
+ if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) {
// complete miss on store conditional... just give up now
pkt->req->setExtraData(0);
return true;
@@ -674,7 +674,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// We use lookupLatency here because it is used to specify the latency
// to access.
Cycles lat = lookupLatency;
- CacheBlk *blk = NULL;
+ CacheBlk *blk = nullptr;
bool satisfied = false;
{
PacketList writebacks;
@@ -1013,7 +1013,7 @@ Cache::recvAtomic(PacketPtr pkt)
// writebacks... that would mean that someone used an atomic
// access in timing mode
- CacheBlk *blk = NULL;
+ CacheBlk *blk = nullptr;
PacketList writebacks;
bool satisfied = access(pkt, blk, lat, writebacks);
@@ -1035,7 +1035,7 @@ Cache::recvAtomic(PacketPtr pkt)
PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
- bool is_forward = (bus_pkt == NULL);
+ bool is_forward = (bus_pkt == nullptr);
if (is_forward) {
// just forwarding the same request to the next level
@@ -1275,7 +1275,7 @@ Cache::recvTimingResp(PacketPtr pkt)
if (mshr == noTargetMSHR) {
// we always clear at least one target
clearBlocked(Blocked_NoTargets);
- noTargetMSHR = NULL;
+ noTargetMSHR = nullptr;
}
// Initial target is used just for stats
@@ -1315,7 +1315,7 @@ Cache::recvTimingResp(PacketPtr pkt)
pkt->getAddr());
blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill);
- assert(blk != NULL);
+ assert(blk != nullptr);
}
// allow invalidation responses originating from write-line
@@ -1360,7 +1360,7 @@ Cache::recvTimingResp(PacketPtr pkt)
mshr->promoteWritable();
// NB: we use the original packet here and not the response!
blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
- assert(blk != NULL);
+ assert(blk != nullptr);
// treat as a fill, and discard the invalidation
// response
@@ -1660,7 +1660,7 @@ Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
{
CacheBlk *blk = tags->findVictim(addr);
- // It is valid to return NULL if there is no victim
+ // It is valid to return nullptr if there is no victim
if (!blk)
return nullptr;
@@ -1674,7 +1674,7 @@ Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
assert(repl_mshr->needsWritable());
// too hard to replace block with transient state
// allocation failed, block not inserted
- return NULL;
+ return nullptr;
} else {
DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
"(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns",
@@ -1726,7 +1726,7 @@ Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
assert(addr == blockAlign(addr));
assert(!writeBuffer.findMatch(addr, is_secure));
- if (blk == NULL) {
+ if (blk == nullptr) {
// better have read new data...
assert(pkt->hasData());
@@ -1737,9 +1737,9 @@ Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
// need to do a replacement if allocating, otherwise we stick
// with the temporary storage
- blk = allocate ? allocateBlock(addr, is_secure, writebacks) : NULL;
+ blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr;
- if (blk == NULL) {
+ if (blk == nullptr) {
// No replaceable block or a mostly exclusive
// cache... just use temporary storage to complete the
// current request and then get rid of it
@@ -2309,7 +2309,7 @@ Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
// prefetch request because prefetch requests need an MSHR and may
// generate a snoop response.
assert(pkt->isEviction());
- snoop_pkt.senderState = NULL;
+ snoop_pkt.senderState = nullptr;
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
// Writeback/CleanEvict snoops do not generate a snoop response.
assert(!(snoop_pkt.cacheResponding()));
@@ -2353,7 +2353,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
// we should never have hardware prefetches to allocated
// blocks
- assert(blk == NULL);
+ assert(blk == nullptr);
// We need to check the caches above us to verify that
// they don't have a copy of this block in the dirty state
@@ -2415,7 +2415,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
// MSHR request, proceed to get the packet to send downstream
PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable());
- mshr->isForward = (pkt == NULL);
+ mshr->isForward = (pkt == nullptr);
if (mshr->isForward) {
// not a cache block request, but a response is expected
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index d0bec127f..e1c99ea04 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -290,7 +290,7 @@ class Cache : public BaseCache
* Find a block frame for new block at address addr targeting the
* given security space, assuming that the block is not currently
* in the cache. Append writebacks if any to provided packet
- * list. Return free block frame. May return NULL if there are
+ * list. Return free block frame. May return nullptr if there are
* no replaceable blocks at the moment.
*/
CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
@@ -464,10 +464,10 @@ class Cache : public BaseCache
* given parameters.
* @param cpu_pkt The miss that needs to be satisfied.
* @param blk The block currently in the cache corresponding to
- * cpu_pkt (NULL if none).
+ * cpu_pkt (nullptr if none).
* @param needsWritable Indicates that the block must be writable
* even if the request in cpu_pkt doesn't indicate that.
- * @return A new Packet containing the request, or NULL if the
+ * @return A new Packet containing the request, or nullptr if the
* current request in cpu_pkt should just be forwarded on.
*/
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 14f381e91..5c4481e02 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -95,7 +95,7 @@ MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
// an MSHR entry. If we do, set the downstreamPending
// flag. Otherwise, do nothing.
MSHR *mshr = pkt->findNextSenderState<MSHR>();
- if (mshr != NULL) {
+ if (mshr != nullptr) {
assert(!mshr->downstreamPending);
mshr->downstreamPending = true;
} else {
@@ -166,7 +166,7 @@ MSHR::TargetList::clearDownstreamPending()
// downstreamPending flag in all caches this packet has
// passed through.
MSHR *mshr = t.pkt->findNextSenderState<MSHR>();
- if (mshr != NULL) {
+ if (mshr != nullptr) {
mshr->clearDownstreamPending();
}
}
@@ -491,7 +491,7 @@ MSHR::checkFunctional(PacketPtr pkt)
// For other requests, we iterate over the individual targets
// since that's where the actual data lies.
if (pkt->isPrint()) {
- pkt->checkFunctional(this, blkAddr, isSecure, blkSize, NULL);
+ pkt->checkFunctional(this, blkAddr, isSecure, blkSize, nullptr);
return false;
} else {
return (targets.checkFunctional(pkt) ||
diff --git a/src/mem/cache/prefetch/queued.cc b/src/mem/cache/prefetch/queued.cc
index 015609bab..863d36900 100644
--- a/src/mem/cache/prefetch/queued.cc
+++ b/src/mem/cache/prefetch/queued.cc
@@ -117,14 +117,14 @@ QueuedPrefetcher::getPacket()
if (pfq.empty()) {
DPRINTF(HWPrefetch, "No hardware prefetches available.\n");
- return NULL;
+ return nullptr;
}
PacketPtr pkt = pfq.begin()->pkt;
pfq.pop_front();
pfIssued++;
- assert(pkt != NULL);
+ assert(pkt != nullptr);
DPRINTF(HWPrefetch, "Generating prefetch for %#x.\n", pkt->getAddr());
return pkt;
}
diff --git a/src/mem/cache/queue.hh b/src/mem/cache/queue.hh
index fb3e73608..f6941e64a 100644
--- a/src/mem/cache/queue.hh
+++ b/src/mem/cache/queue.hh
@@ -206,7 +206,7 @@ class Queue : public Drainable
Entry* getNext() const
{
if (readyList.empty() || readyList.front()->readyTime > curTick()) {
- return NULL;
+ return nullptr;
}
return readyList.front();
}
diff --git a/src/mem/cache/tags/base_set_assoc.hh b/src/mem/cache/tags/base_set_assoc.hh
index 910d44b36..053274c53 100644
--- a/src/mem/cache/tags/base_set_assoc.hh
+++ b/src/mem/cache/tags/base_set_assoc.hh
@@ -193,7 +193,7 @@ public:
/**
* Access block and update replacement data. May not succeed, in which case
- * NULL pointer is returned. This has all the implications of a cache
+ * nullptr is returned. This has all the implications of a cache
* access and should only be used as such. Returns the access latency as a
* side effect.
* @param addr The address to find.
@@ -215,14 +215,14 @@ public:
// a hit. Sequential access with a miss doesn't access data.
tagAccesses += allocAssoc;
if (sequentialAccess) {
- if (blk != NULL) {
+ if (blk != nullptr) {
dataAccesses += 1;
}
} else {
dataAccesses += allocAssoc;
}
- if (blk != NULL) {
+ if (blk != nullptr) {
if (blk->whenReady > curTick()
&& cache->ticksToCycles(blk->whenReady - curTick())
> accessLatency) {
@@ -253,7 +253,7 @@ public:
*/
CacheBlk* findVictim(Addr addr) override
{
- BlkType *blk = NULL;
+ BlkType *blk = nullptr;
int set = extractSet(addr);
// prefer to evict an invalid block
diff --git a/src/mem/cache/tags/cacheset.hh b/src/mem/cache/tags/cacheset.hh
index 71a69b3dc..8bf0989f7 100644
--- a/src/mem/cache/tags/cacheset.hh
+++ b/src/mem/cache/tags/cacheset.hh
@@ -104,7 +104,7 @@ CacheSet<Blktype>::findBlk(Addr tag, bool is_secure, int& way_id) const
return blks[i];
}
}
- return NULL;
+ return nullptr;
}
template <class Blktype>
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index c9531d12c..ecac88afa 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -79,12 +79,12 @@ FALRU::FALRU(const Params *p)
head = &(blks[0]);
tail = &(blks[numBlocks-1]);
- head->prev = NULL;
+ head->prev = nullptr;
head->next = &(blks[1]);
head->inCache = cacheMask;
tail->prev = &(blks[numBlocks-2]);
- tail->next = NULL;
+ tail->next = nullptr;
tail->inCache = 0;
unsigned index = (1 << 17) / blkSize;
@@ -159,7 +159,7 @@ FALRU::hashLookup(Addr addr) const
if (iter != tagHash.end()) {
return (*iter).second;
}
- return NULL;
+ return nullptr;
}
void
@@ -199,7 +199,7 @@ FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src,
moveToHead(blk);
}
} else {
- blk = NULL;
+ blk = nullptr;
for (unsigned i = 0; i <= numCaches; ++i) {
misses[i]++;
}
@@ -223,7 +223,7 @@ FALRU::findBlock(Addr addr, bool is_secure) const
if (blk && blk->isValid()) {
assert(blk->tag == blkAddr);
} else {
- blk = NULL;
+ blk = nullptr;
}
return blk;
}
@@ -277,15 +277,15 @@ FALRU::moveToHead(FALRUBlk *blk)
blk->inCache = cacheMask;
if (blk != head) {
if (blk == tail){
- assert(blk->next == NULL);
+ assert(blk->next == nullptr);
tail = blk->prev;
- tail->next = NULL;
+ tail->next = nullptr;
} else {
blk->prev->next = blk->next;
blk->next->prev = blk->prev;
}
blk->next = head;
- blk->prev = NULL;
+ blk->prev = nullptr;
head->prev = blk;
head = blk;
}
diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh
index 0dd402cea..ecfe467ef 100644
--- a/src/mem/cache/tags/fa_lru.hh
+++ b/src/mem/cache/tags/fa_lru.hh
@@ -178,8 +178,8 @@ public:
/**
* Access block and update replacement data. May not succeed, in which
- * case NULL pointer is returned. This has all the implications of a cache
- * access and should only be used as such.
+ * case nullptr pointer is returned. This has all the implications of a
+ * cache access and should only be used as such.
* Returns the access latency and inCache flags as a side effect.
* @param addr The address to look for.
* @param is_secure True if the target memory space is secure.
diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc
index 31423f994..a85b63889 100644
--- a/src/mem/cache/tags/lru.cc
+++ b/src/mem/cache/tags/lru.cc
@@ -59,7 +59,7 @@ LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{
CacheBlk *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
- if (blk != NULL) {
+ if (blk != nullptr) {
// move this block to head of the MRU list
sets[blk->set].moveToHead(blk);
DPRINTF(CacheRepl, "set %x: moving blk %x (%s) to MRU\n",
@@ -75,7 +75,7 @@ LRU::findVictim(Addr addr)
{
int set = extractSet(addr);
// grab a replacement candidate
- BlkType *blk = NULL;
+ BlkType *blk = nullptr;
for (int i = assoc - 1; i >= 0; i--) {
BlkType *b = sets[set].blks[i];
if (b->way < allocAssoc) {
diff --git a/src/mem/cache/write_queue_entry.cc b/src/mem/cache/write_queue_entry.cc
index e54fed7a4..283c4a862 100644
--- a/src/mem/cache/write_queue_entry.cc
+++ b/src/mem/cache/write_queue_entry.cc
@@ -132,7 +132,7 @@ WriteQueueEntry::checkFunctional(PacketPtr pkt)
// entity. For other requests, we iterate over the individual
// targets since that's where the actual data lies.
if (pkt->isPrint()) {
- pkt->checkFunctional(this, blkAddr, isSecure, blkSize, NULL);
+ pkt->checkFunctional(this, blkAddr, isSecure, blkSize, nullptr);
return false;
} else {
return targets.checkFunctional(pkt);