From 72cfed41641bbea2ea3dc78958ed3b1e2c27bbf9 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Sat, 26 Sep 2009 10:50:50 -0700 Subject: Force prefetches to check cache and MSHRs immediately prior to issue. This prevents redundant prefetches from being issued, solving the occasional 'needsExclusive && !blk->isWritable()' assertion failure in cache_impl.hh that several people have run into. Eliminates "prefetch_cache_check_push" flag, neither setting of which really solved the problem. --- src/mem/cache/BaseCache.py | 2 -- src/mem/cache/cache_impl.hh | 13 ++++++++----- src/mem/cache/prefetch/base.cc | 16 ---------------- src/mem/cache/prefetch/base.hh | 4 ---- 4 files changed, 8 insertions(+), 27 deletions(-) (limited to 'src/mem/cache') diff --git a/src/mem/cache/BaseCache.py b/src/mem/cache/BaseCache.py index bdef07cb4..5ded05400 100644 --- a/src/mem/cache/BaseCache.py +++ b/src/mem/cache/BaseCache.py @@ -68,8 +68,6 @@ class BaseCache(MemObject): "Latency of the prefetcher") prefetch_policy = Param.Prefetch('none', "Type of prefetcher to use") - prefetch_cache_check_push = Param.Bool(True, - "Check if in cache on push or pop of prefetch queue") prefetch_use_cpu_id = Param.Bool(True, "Use the CPU ID to separate calculations of prefetches") prefetch_data_accesses_only = Param.Bool(False, diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 80b7c545c..d8630c1f5 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -1301,11 +1301,14 @@ Cache::getNextMSHR() // If we have a miss queue slot, we can try a prefetch PacketPtr pkt = prefetcher->getPacket(); if (pkt) { - // Update statistic on number of prefetches issued - // (hwpf_mshr_misses) - mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++; - // Don't request bus, since we already have it - return allocateMissBuffer(pkt, curTick, false); + Addr pf_addr = blockAlign(pkt->getAddr()); + if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) { + // Update statistic on number of prefetches issued + // (hwpf_mshr_misses) + mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++; + // Don't request bus, since we already have it + return allocateMissBuffer(pkt, curTick, false); + } } } diff --git a/src/mem/cache/prefetch/base.cc b/src/mem/cache/prefetch/base.cc index f0e244a09..ad7a0c882 100644 --- a/src/mem/cache/prefetch/base.cc +++ b/src/mem/cache/prefetch/base.cc @@ -45,7 +45,6 @@ BasePrefetcher::BasePrefetcher(const BaseCacheParams *p) : size(p->prefetcher_size), pageStop(!p->prefetch_past_page), serialSquash(p->prefetch_serial_squash), - cacheCheckPush(p->prefetch_cache_check_push), onlyData(p->prefetch_data_accesses_only) { } @@ -143,9 +142,6 @@ BasePrefetcher::getPacket() do { pkt = *pf.begin(); pf.pop_front(); - if (!cacheCheckPush) { - keep_trying = cache->inCache(pkt->getAddr()); - } if (keep_trying) { DPRINTF(HWPrefetch, "addr 0x%x in cache, skipping\n", @@ -226,18 +222,6 @@ BasePrefetcher::notify(PacketPtr &pkt, Tick time) "inserting into prefetch queue with delay %d time %d\n", addr, *delayIter, time); - // Check if it is already in the cache - if (cacheCheckPush && cache->inCache(addr)) { - DPRINTF(HWPrefetch, "Prefetch addr already in cache\n"); - continue; - } - - // Check if it is already in the miss_queue - if (cache->inMissQueue(addr)) { - DPRINTF(HWPrefetch, "Prefetch addr already in miss queue\n"); - continue; - } - // Check if it is already in the pf buffer if (inPrefetch(addr) != pf.end()) { pfBufferHit++; diff --git a/src/mem/cache/prefetch/base.hh b/src/mem/cache/prefetch/base.hh index b5f33a455..e3c0cbf16 100644 --- a/src/mem/cache/prefetch/base.hh +++ b/src/mem/cache/prefetch/base.hh @@ -68,10 +68,6 @@ class BasePrefetcher /** Do we remove prefetches with later times than a new miss.*/ bool serialSquash; - /** Do we check if it is in the cache when inserting into buffer, - or removing.*/ - bool cacheCheckPush; - /** Do we prefetch on only data reads, or on inst reads as well. */ bool onlyData; -- cgit v1.2.3