summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
authorMitch Hayenga <mitch.hayenga+gem5@gmail.com>2014-01-29 23:21:26 -0600
committerMitch Hayenga <mitch.hayenga+gem5@gmail.com>2014-01-29 23:21:26 -0600
commit771c864bf407b57bf91896f38e989e8a36cd9cd1 (patch)
treecc0f63f933915a9e58ec2420535c67667af5a180 /src/mem
parent95735e10e7ea85320ee39c15a4132eece8417af4 (diff)
downloadgem5-771c864bf407b57bf91896f38e989e8a36cd9cd1.tar.xz
mem: Allowed tagged instruction prefetching in stride prefetcher
For systems with a tightly coupled L2, a stride-based prefetcher may observe access requests from both instruction and data L1 caches. However, the PC address of an instruction miss gives no relevant training information to the stride based prefetcher(there is no stride to train). In theses cases, its better if the L2 stride prefetcher simply reverted back to a simple N-block ahead prefetcher. This patch enables this option. Committed by: Nilay Vaish <nilay@cs.wisc.edu>
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/cache/prefetch/Prefetcher.py2
-rw-r--r--src/mem/cache/prefetch/stride.cc17
-rw-r--r--src/mem/cache/prefetch/stride.hh4
3 files changed, 22 insertions, 1 deletions
diff --git a/src/mem/cache/prefetch/Prefetcher.py b/src/mem/cache/prefetch/Prefetcher.py
index 7d7aeed32..c4b6b8845 100644
--- a/src/mem/cache/prefetch/Prefetcher.py
+++ b/src/mem/cache/prefetch/Prefetcher.py
@@ -65,6 +65,8 @@ class BasePrefetcher(ClockedObject):
"Only prefetch on read requests (write requests ignored)")
on_prefetch = Param.Bool(True,
"Let lower cache prefetcher train on prefetch requests")
+ inst_tagged = Param.Bool(True,
+ "Perform a tagged prefetch for instruction fetches always")
sys = Param.System(Parent.any, "System this device belongs to")
class GHBPrefetcher(BasePrefetcher):
diff --git a/src/mem/cache/prefetch/stride.cc b/src/mem/cache/prefetch/stride.cc
index a7abf4809..c4cf2023a 100644
--- a/src/mem/cache/prefetch/stride.cc
+++ b/src/mem/cache/prefetch/stride.cc
@@ -66,6 +66,23 @@ StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
assert(master_id < Max_Contexts);
std::list<StrideEntry*> &tab = table[master_id];
+ // Revert to simple N-block ahead prefetch for instruction fetches
+ if (instTagged && pkt->req->isInstFetch()) {
+ for (int d = 1; d <= degree; d++) {
+ Addr new_addr = data_addr + d * blkSize;
+ if (pageStop && !samePage(data_addr, new_addr)) {
+ // Spanned the page, so now stop
+ pfSpanPage += degree - d + 1;
+ return;
+ }
+ DPRINTF(HWPrefetch, "queuing prefetch to %x @ %d\n",
+ new_addr, latency);
+ addresses.push_back(new_addr);
+ delays.push_back(latency);
+ }
+ return;
+ }
+
/* Scan Table for instAddr Match */
std::list<StrideEntry*>::iterator iter;
for (iter = tab.begin(); iter != tab.end(); iter++) {
diff --git a/src/mem/cache/prefetch/stride.hh b/src/mem/cache/prefetch/stride.hh
index b02d97d56..0e31984f9 100644
--- a/src/mem/cache/prefetch/stride.hh
+++ b/src/mem/cache/prefetch/stride.hh
@@ -76,10 +76,12 @@ class StridePrefetcher : public BasePrefetcher
std::list<StrideEntry*> table[Max_Contexts];
+ bool instTagged;
+
public:
StridePrefetcher(const Params *p)
- : BasePrefetcher(p)
+ : BasePrefetcher(p), instTagged(p->inst_tagged)
{
}