summaryrefslogtreecommitdiff
path: root/src/mem/cache/prefetch/pif.cc
diff options
context:
space:
mode:
authorIvan Pizarro <ivan.pizarro@metempsy.com>2019-02-25 14:30:55 +0100
committerJavier Bueno Hedo <javier.bueno@metempsy.com>2019-04-02 10:38:39 +0000
commit4628d87e3abbcad0c5a95b6a4562b8ac8c6f4661 (patch)
treebfc48023ee80f5b42b516e1c03cfb22e8e327706 /src/mem/cache/prefetch/pif.cc
parent283e092eda0b739a3c28bfa6293822e57ca67db6 (diff)
downloadgem5-4628d87e3abbcad0c5a95b6a4562b8ac8c6f4661.tar.xz
mem-cache: Proactive Instruction Fetch Implementation
Ferdman, M., Kaynak, C., & Falsafi, B. (2011, December). Proactive instruction fetch. In Proceedings of the 44th Annual IEEE/ACM International Symposium on Microarchitecture (pp. 152-162). ACM. Change-Id: I38c3ab30a94ab279f03e3d5936ce8ed118310c0e Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/16968 Reviewed-by: Nikos Nikoleris <nikos.nikoleris@arm.com> Reviewed-by: Daniel Carvalho <odanrc@yahoo.com.br> Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com>
Diffstat (limited to 'src/mem/cache/prefetch/pif.cc')
-rw-r--r--src/mem/cache/prefetch/pif.cc259
1 files changed, 259 insertions, 0 deletions
diff --git a/src/mem/cache/prefetch/pif.cc b/src/mem/cache/prefetch/pif.cc
new file mode 100644
index 000000000..04064d497
--- /dev/null
+++ b/src/mem/cache/prefetch/pif.cc
@@ -0,0 +1,259 @@
+/**
+ * Copyright (c) 2019 Metempsy Technology Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ivan Pizarro
+ */
+
+#include "mem/cache/prefetch/pif.hh"
+
+#include <cmath>
+#include <utility>
+
+#include "debug/HWPrefetch.hh"
+#include "mem/cache/prefetch/associative_set_impl.hh"
+#include "params/PIFPrefetcher.hh"
+
+PIFPrefetcher::PIFPrefetcher(const PIFPrefetcherParams *p)
+ : QueuedPrefetcher(p),
+ precSize(p->prec_spatial_region_bits),
+ succSize(p->succ_spatial_region_bits),
+ maxCompactorEntries(p->compactor_entries),
+ maxStreamAddressBufferEntries(p->stream_address_buffer_entries),
+ historyBuffer(p->history_buffer_size),
+ historyBufferTail(0),
+ index(p->index_assoc, p->index_entries, p->index_indexing_policy,
+ p->index_replacement_policy),
+ streamAddressBuffer(), listenersPC()
+{
+}
+
+PIFPrefetcher::CompactorEntry::CompactorEntry(Addr addr,
+ unsigned int prec_size, unsigned int succ_size)
+{
+ trigger = addr;
+ prec.resize(prec_size, false);
+ succ.resize(succ_size, false);
+}
+
+unsigned int
+PIFPrefetcher::CompactorEntry::distanceFromTrigger(Addr target,
+ unsigned int log_blk_size) const {
+ const Addr target_blk = target >> log_blk_size;
+ const Addr trigger_blk = trigger >> log_blk_size;
+
+ return std::abs(target_blk - trigger_blk);
+}
+
+bool
+PIFPrefetcher::CompactorEntry::inSameSpatialRegion(Addr pc,
+ unsigned int log_blk_size, bool update)
+{
+ unsigned int blk_distance = distanceFromTrigger(pc, log_blk_size);
+
+ bool hit = (pc > trigger) ?
+ (succ.size() >= blk_distance) : (prec.size() >= blk_distance);
+ if (hit && update) {
+ if (pc > trigger) {
+ succ[blk_distance - 1] = true;
+ } else if (pc < trigger) {
+ prec[blk_distance - 1] = true;
+ }
+ }
+ return hit;
+}
+
+bool
+PIFPrefetcher::CompactorEntry::hasAddress(Addr target,
+ unsigned int log_blk_size) const
+{
+ unsigned int blk_distance = distanceFromTrigger(target, log_blk_size);
+ bool hit = false;
+ if (target > trigger) {
+ hit = blk_distance <= succ.size() && succ[blk_distance - 1];
+ } else if (target < trigger) {
+ hit = blk_distance <= prec.size() && succ[blk_distance - 1];
+ } else {
+ hit = true;
+ }
+ return hit;
+}
+
+void
+PIFPrefetcher::CompactorEntry::getPredictedAddresses(unsigned int log_blk_size,
+ std::vector<AddrPriority> &addresses) const
+{
+ // Calculate the addresses of the instruction blocks that are encoded
+ // by the bit vector and issue prefetch requests for these addresses.
+ // Predictions are made by traversing the bit vector from left to right
+ // as this typically predicts the accesses in the order they will be
+ // issued in the core.
+ const Addr trigger_blk = trigger >> log_blk_size;
+ for (int i = prec.size()-1; i >= 0; i--) {
+ // Address from the preceding blocks to issue a prefetch
+ if (prec[i]) {
+ const Addr prec_addr = (trigger_blk - (i+1)) << log_blk_size;
+ addresses.push_back(AddrPriority(prec_addr, 0));
+ }
+ }
+ for (int i = 0; i < succ.size(); i++) {
+ // Address from the succeding blocks to issue a prefetch
+ if (succ[i]) {
+ const Addr succ_addr = (trigger_blk + (i+1)) << log_blk_size;
+ addresses.push_back(AddrPriority(succ_addr, 0));
+ }
+ }
+}
+
+void
+PIFPrefetcher::notifyRetiredInst(const Addr pc)
+{
+ // First access to the prefetcher
+ if (temporalCompactor.size() == 0) {
+ spatialCompactor = CompactorEntry(pc, precSize, succSize);
+ } else {
+ // If the PC of the instruction retired is in the same spatial region
+ // than the last trigger address, update the bit vectors based on the
+ // distance between them
+ if (spatialCompactor.inSameSpatialRegion(pc, lBlkSize, true)) {
+ // If the PC of the instruction retired is outside the latest spatial
+ // region, check if it matches in any of the regions in the temporal
+ // compactor and update it to the MRU position
+ } else {
+ bool is_in_temporal_compactor = false;
+
+ // Check if the PC is in the temporal compactor
+ for (auto it = temporalCompactor.begin();
+ it != temporalCompactor.end(); it++)
+ {
+ if (it->inSameSpatialRegion(pc, lBlkSize, false)) {
+ spatialCompactor = (*it);
+ temporalCompactor.erase(it);
+ is_in_temporal_compactor = true;
+ break;
+ }
+ }
+
+ if (temporalCompactor.size() == maxCompactorEntries) {
+ temporalCompactor.pop_front(); // Discard the LRU entry
+ }
+
+ temporalCompactor.push_back(spatialCompactor);
+
+ // If the compactor entry is neither the spatial or can't be
+ // found in the temporal compactor, reset the spatial compactor
+ // updating the trigger address and resetting the vector bits
+ if (!is_in_temporal_compactor) {
+ // Insert the spatial entry into the history buffer and update
+ // the 'index' table to point to the new entry
+ historyBuffer[historyBufferTail] = spatialCompactor;
+
+ IndexEntry *idx_entry =
+ index.findEntry(spatialCompactor.trigger, false);
+ if (idx_entry != nullptr) {
+ index.accessEntry(idx_entry);
+ } else {
+ idx_entry = index.findVictim(spatialCompactor.trigger);
+ assert(idx_entry != nullptr);
+ index.insertEntry(spatialCompactor.trigger, false,
+ idx_entry);
+ }
+ idx_entry->historyIndex = historyBufferTail;
+
+ historyBufferTail++;
+ if (historyBufferTail == historyBuffer.size()) {
+ historyBufferTail = 0;
+ }
+
+ // Reset the spatial compactor fields with the new address
+ spatialCompactor = CompactorEntry(pc, precSize, succSize);
+ }
+ }
+ }
+}
+
+void
+PIFPrefetcher::calculatePrefetch(const PrefetchInfo &pfi,
+ std::vector<AddrPriority> &addresses)
+{
+ const Addr addr = pfi.getAddr();
+
+ // First check if the access has been prefetched, this is done by
+ // comparing the access against the active Stream Address Buffers
+ for (auto &sabEntry : streamAddressBuffer) {
+ if (sabEntry->hasAddress(addr, lBlkSize)) {
+ // Advance to the next entry (first check if we have reached the
+ // end of the history buffer)
+ if (sabEntry == &(historyBuffer[historyBuffer.size() - 1])) {
+ sabEntry = &(historyBuffer[0]);
+ } else {
+ sabEntry++;
+ }
+ sabEntry->getPredictedAddresses(lBlkSize, addresses);
+ // We are done
+ return;
+ }
+ }
+
+ // Check if a valid entry in the 'index' table is found and allocate a new
+ // active prediction stream
+ IndexEntry *idx_entry = index.findEntry(addr, /* unused */ false);
+
+ if (idx_entry != nullptr) {
+ index.accessEntry(idx_entry);
+ // Trigger address from the 'index' table and index to the history
+ // buffer
+ const unsigned int hb_entry = idx_entry->historyIndex;
+ CompactorEntry *entry = &historyBuffer[hb_entry];
+
+ // Track the block in the Stream Address Buffer
+ if (streamAddressBuffer.size() == maxStreamAddressBufferEntries) {
+ streamAddressBuffer.pop_front();
+ }
+ streamAddressBuffer.push_back(entry);
+
+ entry->getPredictedAddresses(lBlkSize, addresses);
+ }
+}
+
+void
+PIFPrefetcher::PrefetchListenerPC::notify(const Addr& pc)
+{
+ parent.notifyRetiredInst(pc);
+}
+
+void
+PIFPrefetcher::addEventProbeRetiredInsts(SimObject *obj, const char *name)
+{
+ ProbeManager *pm(obj->getProbeManager());
+ listenersPC.push_back(new PrefetchListenerPC(*this, pm, name));
+}
+
+PIFPrefetcher*
+PIFPrefetcherParams::create()
+{
+ return new PIFPrefetcher(this);
+}