summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/cache/base_cache.hh4
-rw-r--r--src/mem/cache/cache.cc22
-rw-r--r--src/mem/cache/cache.hh191
-rw-r--r--src/mem/cache/cache_builder.cc151
-rw-r--r--src/mem/cache/cache_impl.hh482
-rw-r--r--src/mem/cache/prefetch/base_prefetcher.cc26
-rw-r--r--src/mem/cache/prefetch/base_prefetcher.hh13
-rw-r--r--src/mem/cache/prefetch/ghb_prefetcher.cc42
-rw-r--r--src/mem/cache/prefetch/ghb_prefetcher.hh59
-rw-r--r--src/mem/cache/prefetch/stride_prefetcher.cc58
-rw-r--r--src/mem/cache/prefetch/stride_prefetcher.hh77
-rw-r--r--src/mem/cache/prefetch/tagged_prefetcher.cc (renamed from src/mem/cache/prefetch/tagged_prefetcher_impl.hh)12
-rw-r--r--src/mem/cache/prefetch/tagged_prefetcher.hh17
-rw-r--r--src/mem/cache/tags/fa_lru.cc43
-rw-r--r--src/mem/cache/tags/fa_lru.hh17
-rw-r--r--src/mem/cache/tags/iic.cc62
-rw-r--r--src/mem/cache/tags/iic.hh16
-rw-r--r--src/mem/cache/tags/lru.cc24
-rw-r--r--src/mem/cache/tags/lru.hh16
-rw-r--r--src/mem/cache/tags/split.cc58
-rw-r--r--src/mem/cache/tags/split.hh16
-rw-r--r--src/mem/cache/tags/split_lifo.cc28
-rw-r--r--src/mem/cache/tags/split_lifo.hh16
-rw-r--r--src/mem/cache/tags/split_lru.cc24
-rw-r--r--src/mem/cache/tags/split_lru.hh16
-rw-r--r--src/mem/physical.cc51
26 files changed, 925 insertions, 616 deletions
diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh
index a7f035dce..c10d98e8e 100644
--- a/src/mem/cache/base_cache.hh
+++ b/src/mem/cache/base_cache.hh
@@ -692,6 +692,10 @@ class BaseCache : public MemObject
}
return true;
}
+
+ virtual bool inCache(Addr addr) = 0;
+
+ virtual bool inMissQueue(Addr addr) = 0;
};
#endif //__BASE_CACHE_HH__
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 29ca09060..cb4e7f62e 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -38,8 +38,6 @@
#include "mem/config/cache.hh"
-#include "mem/cache/tags/cache_tags.hh"
-
#if defined(USE_CACHE_LRU)
#include "mem/cache/tags/lru.hh"
#endif
@@ -73,28 +71,28 @@
#if defined(USE_CACHE_FALRU)
-template class Cache<CacheTags<FALRU>, SimpleCoherence>;
-template class Cache<CacheTags<FALRU>, UniCoherence>;
+template class Cache<FALRU, SimpleCoherence>;
+template class Cache<FALRU, UniCoherence>;
#endif
#if defined(USE_CACHE_IIC)
-template class Cache<CacheTags<IIC>, SimpleCoherence>;
-template class Cache<CacheTags<IIC>, UniCoherence>;
+template class Cache<IIC, SimpleCoherence>;
+template class Cache<IIC, UniCoherence>;
#endif
#if defined(USE_CACHE_LRU)
-template class Cache<CacheTags<LRU>, SimpleCoherence>;
-template class Cache<CacheTags<LRU>, UniCoherence>;
+template class Cache<LRU, SimpleCoherence>;
+template class Cache<LRU, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT)
-template class Cache<CacheTags<Split>, SimpleCoherence>;
-template class Cache<CacheTags<Split>, UniCoherence>;
+template class Cache<Split, SimpleCoherence>;
+template class Cache<Split, UniCoherence>;
#endif
#if defined(USE_CACHE_SPLIT_LIFO)
-template class Cache<CacheTags<SplitLIFO>, SimpleCoherence>;
-template class Cache<CacheTags<SplitLIFO>, UniCoherence>;
+template class Cache<SplitLIFO, SimpleCoherence>;
+template class Cache<SplitLIFO, UniCoherence>;
#endif
#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index 097b0f513..26dab2179 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -38,16 +38,17 @@
#ifndef __CACHE_HH__
#define __CACHE_HH__
+#include "base/compression/base.hh"
#include "base/misc.hh" // fatal, panic, and warn
#include "cpu/smt.hh" // SMT_MAX_THREADS
#include "mem/cache/base_cache.hh"
+#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/miss_buffer.hh"
-#include "mem/cache/prefetch/prefetcher.hh"
//Forward decleration
class MSHR;
-
+class BasePrefetcher;
/**
* A template-policy based cache. The behavior of the cache can be altered by
@@ -62,6 +63,8 @@ class Cache : public BaseCache
public:
/** Define the type of cache block to use. */
typedef typename TagStore::BlkType BlkType;
+ /** A typedef for a list of BlkType pointers. */
+ typedef typename TagStore::BlkList BlkList;
bool prefetchAccess;
@@ -115,7 +118,7 @@ class Cache : public BaseCache
Coherence *coherence;
/** Prefetcher */
- Prefetcher<TagStore> *prefetcher;
+ BasePrefetcher *prefetcher;
/**
* The clock ratio of the outgoing bus.
@@ -141,6 +144,141 @@ class Cache : public BaseCache
PacketPtr invalidatePkt;
Request *invalidateReq;
+ /**
+ * Policy class for performing compression.
+ */
+ CompressionAlgorithm *compressionAlg;
+
+ /**
+ * The block size of this cache. Set to value in the Tags object.
+ */
+ const int16_t blkSize;
+
+ /**
+ * Can this cache should allocate a block on a line-sized write miss.
+ */
+ const bool doFastWrites;
+
+ const bool prefetchMiss;
+
+ /**
+ * Can the data can be stored in a compressed form.
+ */
+ const bool storeCompressed;
+
+ /**
+ * Do we need to compress blocks on writebacks (i.e. because
+ * writeback bus is compressed but storage is not)?
+ */
+ const bool compressOnWriteback;
+
+ /**
+ * The latency of a compression operation.
+ */
+ const int16_t compLatency;
+
+ /**
+ * Should we use an adaptive compression scheme.
+ */
+ const bool adaptiveCompression;
+
+ /**
+ * Do writebacks need to be compressed (i.e. because writeback bus
+ * is compressed), whether or not they're already compressed for
+ * storage.
+ */
+ const bool writebackCompressed;
+
+ /**
+ * Compare the internal block data to the fast access block data.
+ * @param blk The cache block to check.
+ * @return True if the data is the same.
+ */
+ bool verifyData(BlkType *blk);
+
+ /**
+ * Update the internal data of the block. The data to write is assumed to
+ * be in the fast access data.
+ * @param blk The block with the data to update.
+ * @param writebacks A list to store any generated writebacks.
+ * @param compress_block True if we should compress this block
+ */
+ void updateData(BlkType *blk, PacketList &writebacks, bool compress_block);
+
+ /**
+ * Handle a replacement for the given request.
+ * @param blk A pointer to the block, usually NULL
+ * @param pkt The memory request to satisfy.
+ * @param new_state The new state of the block.
+ * @param writebacks A list to store any generated writebacks.
+ */
+ BlkType* doReplacement(BlkType *blk, PacketPtr &pkt,
+ CacheBlk::State new_state, PacketList &writebacks);
+
+ /**
+ * Does all the processing necessary to perform the provided request.
+ * @param pkt The memory request to perform.
+ * @param lat The latency of the access.
+ * @param writebacks List for any writebacks that need to be performed.
+ * @param update True if the replacement data should be updated.
+ * @return Pointer to the cache block touched by the request. NULL if it
+ * was a miss.
+ */
+ BlkType* handleAccess(PacketPtr &pkt, int & lat,
+ PacketList & writebacks, bool update = true);
+
+ /**
+ * Populates a cache block and handles all outstanding requests for the
+ * satisfied fill request. This version takes an MSHR pointer and uses its
+ * request to fill the cache block, while repsonding to its targets.
+ * @param blk The cache block if it already exists.
+ * @param mshr The MSHR that contains the fill data and targets to satisfy.
+ * @param new_state The state of the new cache block.
+ * @param writebacks List for any writebacks that need to be performed.
+ * @return Pointer to the new cache block.
+ */
+ BlkType* handleFill(BlkType *blk, MSHR * mshr, CacheBlk::State new_state,
+ PacketList & writebacks, PacketPtr pkt);
+
+ /**
+ * Populates a cache block and handles all outstanding requests for the
+ * satisfied fill request. This version takes two memory requests. One
+ * contains the fill data, the other is an optional target to satisfy.
+ * Used for Cache::probe.
+ * @param blk The cache block if it already exists.
+ * @param pkt The memory request with the fill data.
+ * @param new_state The state of the new cache block.
+ * @param writebacks List for any writebacks that need to be performed.
+ * @param target The memory request to perform after the fill.
+ * @return Pointer to the new cache block.
+ */
+ BlkType* handleFill(BlkType *blk, PacketPtr &pkt,
+ CacheBlk::State new_state,
+ PacketList & writebacks, PacketPtr target = NULL);
+
+ /**
+ * Sets the blk to the new state and handles the given request.
+ * @param blk The cache block being snooped.
+ * @param new_state The new coherence state for the block.
+ * @param pkt The request to satisfy
+ */
+ void handleSnoop(BlkType *blk, CacheBlk::State new_state,
+ PacketPtr &pkt);
+
+ /**
+ * Sets the blk to the new state.
+ * @param blk The cache block being snooped.
+ * @param new_state The new coherence state for the block.
+ */
+ void handleSnoop(BlkType *blk, CacheBlk::State new_state);
+
+ /**
+ * Create a writeback request for the given block.
+ * @param blk The block to writeback.
+ * @return The writeback request for the block.
+ */
+ PacketPtr writebackBlk(BlkType *blk);
+
public:
class Params
@@ -150,18 +288,41 @@ class Cache : public BaseCache
MissBuffer *missQueue;
Coherence *coherence;
BaseCache::Params baseParams;
- Prefetcher<TagStore> *prefetcher;
+ BasePrefetcher*prefetcher;
bool prefetchAccess;
int hitLatency;
+ CompressionAlgorithm *compressionAlg;
+ const int16_t blkSize;
+ const bool doFastWrites;
+ const bool prefetchMiss;
+ const bool storeCompressed;
+ const bool compressOnWriteback;
+ const int16_t compLatency;
+ const bool adaptiveCompression;
+ const bool writebackCompressed;
Params(TagStore *_tags, MissBuffer *mq, Coherence *coh,
BaseCache::Params params,
- Prefetcher<TagStore> *_prefetcher,
- bool prefetch_access, int hit_latency)
+ BasePrefetcher *_prefetcher,
+ bool prefetch_access, int hit_latency,
+ bool do_fast_writes,
+ bool store_compressed, bool adaptive_compression,
+ bool writeback_compressed,
+ CompressionAlgorithm *_compressionAlg, int comp_latency,
+ bool prefetch_miss)
: tags(_tags), missQueue(mq), coherence(coh),
baseParams(params),
prefetcher(_prefetcher), prefetchAccess(prefetch_access),
- hitLatency(hit_latency)
+ hitLatency(hit_latency),
+ compressionAlg(_compressionAlg),
+ blkSize(_tags->getBlockSize()),
+ doFastWrites(do_fast_writes),
+ prefetchMiss(prefetch_miss),
+ storeCompressed(store_compressed),
+ compressOnWriteback(!store_compressed && writeback_compressed),
+ compLatency(comp_latency),
+ adaptiveCompression(adaptive_compression),
+ writebackCompressed(writeback_compressed)
{
}
};
@@ -223,14 +384,6 @@ class Cache : public BaseCache
void snoopResponse(PacketPtr &pkt);
/**
- * Invalidates the block containing address if found.
- * @param addr The address to look for.
- * @param asid The address space ID of the address.
- * @todo Is this function necessary?
- */
- void invalidateBlk(Addr addr);
-
- /**
* Squash all requests associated with specified thread.
* intended for use by I-cache.
* @param threadNum The thread to squash.
@@ -273,6 +426,14 @@ class Cache : public BaseCache
* @return The estimated completion time.
*/
Tick snoopProbe(PacketPtr &pkt);
+
+ bool inCache(Addr addr) {
+ return (tags->findBlock(addr) != 0);
+ }
+
+ bool inMissQueue(Addr addr) {
+ return (missQueue->findMSHR(addr) != 0);
+ }
};
#endif // __CACHE_HH__
diff --git a/src/mem/cache/cache_builder.cc b/src/mem/cache/cache_builder.cc
index 4b1b19718..318b57d50 100644
--- a/src/mem/cache/cache_builder.cc
+++ b/src/mem/cache/cache_builder.cc
@@ -70,9 +70,6 @@
#include "base/compression/null_compression.hh"
#include "base/compression/lzss_compression.hh"
-// CacheTags Templates
-#include "mem/cache/tags/cache_tags.hh"
-
// MissQueue Templates
#include "mem/cache/miss/miss_queue.hh"
#include "mem/cache/miss/blocking_buffer.hh"
@@ -108,8 +105,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<int> tgts_per_mshr;
Param<int> write_buffers;
Param<bool> prioritizeRequests;
-// SimObjectParam<Bus *> in_bus;
-// SimObjectParam<Bus *> out_bus;
SimObjectParam<CoherenceProtocol *> protocol;
Param<Addr> trace_addr;
Param<int> hash_delay;
@@ -122,7 +117,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
Param<int> compression_latency;
Param<int> subblock_size;
Param<Counter> max_miss_count;
-// SimObjectParam<HierParams *> hier;
VectorParam<Range<Addr> > addr_range;
// SimObjectParam<MemTraceWriter *> mem_trace;
Param<bool> split;
@@ -156,9 +150,6 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8),
INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first",
false),
-/* INIT_PARAM_DFLT(in_bus, "incoming bus object", NULL),
- INIT_PARAM(out_bus, "outgoing bus object"),
-*/
INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL),
INIT_PARAM_DFLT(trace_addr, "address to trace", 0),
@@ -182,10 +173,6 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
INIT_PARAM_DFLT(max_miss_count,
"The number of misses to handle before calling exit",
0),
-/* INIT_PARAM_DFLT(hier,
- "Hierarchy global variables",
- &defaultHierParams),
-*/
INIT_PARAM_DFLT(addr_range, "The address range in bytes",
vector<Range<Addr> >(1,RangeIn((Addr)0, MaxAddr))),
// INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL),
@@ -208,47 +195,47 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
END_INIT_SIM_OBJECT_PARAMS(BaseCache)
-#define BUILD_CACHE(t, c) do { \
- Prefetcher<CacheTags<t> > *pf; \
- if (pf_policy == "tagged") { \
- BUILD_TAGGED_PREFETCHER(t); \
- } \
- else if (pf_policy == "stride") { \
- BUILD_STRIDED_PREFETCHER(t); \
- } \
- else if (pf_policy == "ghb") { \
- BUILD_GHB_PREFETCHER(t); \
- } \
- else { \
- BUILD_NULL_PREFETCHER(t); \
- } \
- Cache<CacheTags<t>, c>::Params params(tagStore, mq, coh, \
- base_params, \
- pf, \
- prefetch_access, hit_latency); \
- Cache<CacheTags<t>, c> *retval = \
- new Cache<CacheTags<t>, c>(getInstanceName(), params); \
-return retval; \
+#define BUILD_CACHE(TAGS, tags, c) \
+ do { \
+ BasePrefetcher *pf; \
+ if (pf_policy == "tagged") { \
+ BUILD_TAGGED_PREFETCHER(TAGS); \
+ } \
+ else if (pf_policy == "stride") { \
+ BUILD_STRIDED_PREFETCHER(TAGS); \
+ } \
+ else if (pf_policy == "ghb") { \
+ BUILD_GHB_PREFETCHER(TAGS); \
+ } \
+ else { \
+ BUILD_NULL_PREFETCHER(TAGS); \
+ } \
+ Cache<TAGS, c>::Params params(tags, mq, coh, base_params, \
+ pf, prefetch_access, hit_latency, \
+ true, \
+ store_compressed, \
+ adaptive_compression, \
+ compressed_bus, \
+ compAlg, compression_latency, \
+ prefetch_miss); \
+ Cache<TAGS, c> *retval = \
+ new Cache<TAGS, c>(getInstanceName(), params); \
+ return retval; \
} while (0)
#define BUILD_CACHE_PANIC(x) do { \
panic("%s not compiled into M5", x); \
} while (0)
-#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \
- do { \
- CompressionAlgorithm *compAlg; \
- if (compressed_bus || store_compressed) { \
- compAlg = new LZSSCompression(); \
- } else { \
- compAlg = new NullCompression(); \
- } \
- CacheTags<TAGS> *tagStore = \
- new CacheTags<TAGS>(tags, compression_latency, true, \
- store_compressed, adaptive_compression, \
- compressed_bus, \
- compAlg, prefetch_miss); \
- BUILD_CACHE(TAGS, c); \
+#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \
+ do { \
+ CompressionAlgorithm *compAlg; \
+ if (compressed_bus || store_compressed) { \
+ compAlg = new LZSSCompression(); \
+ } else { \
+ compAlg = new NullCompression(); \
+ } \
+ BUILD_CACHE(TAGS, tags, c); \
} while (0)
#if defined(USE_CACHE_FALRU)
@@ -327,55 +314,55 @@ return retval; \
} while (0)
#if defined(USE_TAGGED)
-#define BUILD_TAGGED_PREFETCHER(t) pf = new \
- TaggedPrefetcher<CacheTags<t> >(prefetcher_size, \
- !prefetch_past_page, \
- prefetch_serial_squash, \
- prefetch_cache_check_push, \
- prefetch_data_accesses_only, \
- prefetch_latency, \
- prefetch_degree)
+#define BUILD_TAGGED_PREFETCHER(t) \
+ pf = new TaggedPrefetcher(prefetcher_size, \
+ !prefetch_past_page, \
+ prefetch_serial_squash, \
+ prefetch_cache_check_push, \
+ prefetch_data_accesses_only, \
+ prefetch_latency, \
+ prefetch_degree)
#else
#define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher")
#endif
#if defined(USE_STRIDED)
-#define BUILD_STRIDED_PREFETCHER(t) pf = new \
- StridePrefetcher<CacheTags<t> >(prefetcher_size, \
- !prefetch_past_page, \
- prefetch_serial_squash, \
- prefetch_cache_check_push, \
- prefetch_data_accesses_only, \
- prefetch_latency, \
- prefetch_degree, \
- prefetch_use_cpu_id)
+#define BUILD_STRIDED_PREFETCHER(t) \
+ pf = new StridePrefetcher(prefetcher_size, \
+ !prefetch_past_page, \
+ prefetch_serial_squash, \
+ prefetch_cache_check_push, \
+ prefetch_data_accesses_only, \
+ prefetch_latency, \
+ prefetch_degree, \
+ prefetch_use_cpu_id)
#else
#define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher")
#endif
#if defined(USE_GHB)
-#define BUILD_GHB_PREFETCHER(t) pf = new \
- GHBPrefetcher<CacheTags<t> >(prefetcher_size, \
- !prefetch_past_page, \
- prefetch_serial_squash, \
- prefetch_cache_check_push, \
- prefetch_data_accesses_only, \
- prefetch_latency, \
- prefetch_degree, \
- prefetch_use_cpu_id)
+#define BUILD_GHB_PREFETCHER(t) \
+ pf = new GHBPrefetcher(prefetcher_size, \
+ !prefetch_past_page, \
+ prefetch_serial_squash, \
+ prefetch_cache_check_push, \
+ prefetch_data_accesses_only, \
+ prefetch_latency, \
+ prefetch_degree, \
+ prefetch_use_cpu_id)
#else
#define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher")
#endif
#if defined(USE_TAGGED)
-#define BUILD_NULL_PREFETCHER(t) pf = new \
- TaggedPrefetcher<CacheTags<t> >(prefetcher_size, \
- !prefetch_past_page, \
- prefetch_serial_squash, \
- prefetch_cache_check_push, \
- prefetch_data_accesses_only, \
- prefetch_latency, \
- prefetch_degree)
+#define BUILD_NULL_PREFETCHER(t) \
+ pf = new TaggedPrefetcher(prefetcher_size, \
+ !prefetch_past_page, \
+ prefetch_serial_squash, \
+ prefetch_cache_check_push, \
+ prefetch_data_accesses_only, \
+ prefetch_latency, \
+ prefetch_degree)
#else
#define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
#endif
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 9f48ff1f3..9c41983fc 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -49,7 +49,7 @@
#include "mem/cache/cache.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/miss/mshr.hh"
-#include "mem/cache/prefetch/prefetcher.hh"
+#include "mem/cache/prefetch/base_prefetcher.hh"
#include "sim/sim_exit.hh" // for SimExitEvent
@@ -72,16 +72,22 @@ Cache(const std::string &_name,
prefetchAccess(params.prefetchAccess),
tags(params.tags), missQueue(params.missQueue),
coherence(params.coherence), prefetcher(params.prefetcher),
- hitLatency(params.hitLatency)
+ hitLatency(params.hitLatency),
+ compressionAlg(params.compressionAlg),
+ blkSize(params.blkSize),
+ doFastWrites(params.doFastWrites),
+ prefetchMiss(params.prefetchMiss),
+ storeCompressed(params.storeCompressed),
+ compressOnWriteback(params.compressOnWriteback),
+ compLatency(params.compLatency),
+ adaptiveCompression(params.adaptiveCompression),
+ writebackCompressed(params.writebackCompressed)
{
tags->setCache(this);
- tags->setPrefetcher(prefetcher);
missQueue->setCache(this);
missQueue->setPrefetcher(prefetcher);
coherence->setCache(this);
prefetcher->setCache(this);
- prefetcher->setTags(tags);
- prefetcher->setBuffer(missQueue);
invalidateReq = new Request((Addr) NULL, blkSize, 0);
invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
}
@@ -98,6 +104,433 @@ Cache<TagStore,Coherence>::regStats()
}
template<class TagStore, class Coherence>
+typename Cache<TagStore,Coherence>::BlkType*
+Cache<TagStore,Coherence>::handleAccess(PacketPtr &pkt, int & lat,
+ PacketList & writebacks, bool update)
+{
+ // Set the block offset here
+ int offset = tags->extractBlkOffset(pkt->getAddr());
+
+ BlkType *blk = NULL;
+ if (update) {
+ blk = tags->findBlock(pkt->getAddr(), lat);
+ } else {
+ blk = tags->findBlock(pkt->getAddr());
+ lat = 0;
+ }
+ if (blk != NULL) {
+
+ if (!update) {
+ if (pkt->isWrite()){
+ assert(offset < blkSize);
+ assert(pkt->getSize() <= blkSize);
+ assert(offset+pkt->getSize() <= blkSize);
+ memcpy(blk->data + offset, pkt->getPtr<uint8_t>(),
+ pkt->getSize());
+ } else if (!(pkt->flags & SATISFIED)) {
+ pkt->flags |= SATISFIED;
+ pkt->result = Packet::Success;
+ assert(offset < blkSize);
+ assert(pkt->getSize() <= blkSize);
+ assert(offset + pkt->getSize() <=blkSize);
+ memcpy(pkt->getPtr<uint8_t>(), blk->data + offset,
+ pkt->getSize());
+ }
+ return blk;
+ }
+
+ // Hit
+ if (blk->isPrefetch()) {
+ //Signal that this was a hit under prefetch (no need for
+ //use prefetch (only can get here if true)
+ DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
+ blk->status &= ~BlkHWPrefetched;
+ if (prefetchMiss) {
+ //If we are using the miss stream, signal the
+ //prefetcher otherwise the access stream would have
+ //already signaled this hit
+ prefetcher->handleMiss(pkt, curTick);
+ }
+ }
+
+ if ((pkt->isWrite() && blk->isWritable()) ||
+ (pkt->isRead() && blk->isValid())) {
+
+ // We are satisfying the request
+ pkt->flags |= SATISFIED;
+
+ if (blk->isCompressed()) {
+ // If the data is compressed, need to increase the latency
+ lat += (compLatency/4);
+ }
+
+ bool write_data = false;
+
+ assert(verifyData(blk));
+
+ assert(offset < blkSize);
+ assert(pkt->getSize() <= blkSize);
+ assert(offset+pkt->getSize() <= blkSize);
+
+ if (pkt->isWrite()) {
+ if (blk->checkWrite(pkt->req)) {
+ write_data = true;
+ blk->status |= BlkDirty;
+ memcpy(blk->data + offset, pkt->getPtr<uint8_t>(),
+ pkt->getSize());
+ }
+ } else {
+ assert(pkt->isRead());
+ if (pkt->req->isLocked()) {
+ blk->trackLoadLocked(pkt->req);
+ }
+ memcpy(pkt->getPtr<uint8_t>(), blk->data + offset,
+ pkt->getSize());
+ }
+
+ if (write_data ||
+ (adaptiveCompression && blk->isCompressed()))
+ {
+ // If we wrote data, need to update the internal block
+ // data.
+ updateData(blk, writebacks,
+ !(adaptiveCompression &&
+ blk->isReferenced()));
+ }
+ } else {
+ // permission violation, treat it as a miss
+ blk = NULL;
+ }
+ } else {
+ // complete miss (no matching block)
+ if (pkt->req->isLocked() && pkt->isWrite()) {
+ // miss on store conditional... just give up now
+ pkt->req->setScResult(0);
+ pkt->flags |= SATISFIED;
+ }
+ }
+
+ return blk;
+}
+
+template<class TagStore, class Coherence>
+typename Cache<TagStore,Coherence>::BlkType*
+Cache<TagStore,Coherence>::handleFill(BlkType *blk, PacketPtr &pkt,
+ CacheBlk::State new_state,
+ PacketList & writebacks,
+ PacketPtr target)
+{
+#ifndef NDEBUG
+ BlkType *tmp_blk = tags->findBlock(pkt->getAddr());
+ assert(tmp_blk == blk);
+#endif
+ blk = doReplacement(blk, pkt, new_state, writebacks);
+
+
+ if (pkt->isRead()) {
+ memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
+ }
+
+ blk->whenReady = pkt->finishTime;
+
+ // Respond to target, if any
+ if (target) {
+
+ target->flags |= SATISFIED;
+
+ if (target->cmd == Packet::InvalidateReq) {
+ tags->invalidateBlk(blk);
+ blk = NULL;
+ }
+
+ if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) {
+ assert(target->isWrite() || target->isRead());
+ assert(target->getOffset(blkSize) + target->getSize() <= blkSize);
+ if (target->isWrite()) {
+ if (blk->checkWrite(pkt->req)) {
+ blk->status |= BlkDirty;
+ memcpy(blk->data + target->getOffset(blkSize),
+ target->getPtr<uint8_t>(), target->getSize());
+ }
+ } else {
+ if (pkt->req->isLocked()) {
+ blk->trackLoadLocked(pkt->req);
+ }
+ memcpy(target->getPtr<uint8_t>(),
+ blk->data + target->getOffset(blkSize),
+ target->getSize());
+ }
+ }
+ }
+
+ if (blk) {
+ // Need to write the data into the block
+ updateData(blk, writebacks, !adaptiveCompression || true);
+ }
+ return blk;
+}
+
+template<class TagStore, class Coherence>
+typename Cache<TagStore,Coherence>::BlkType*
+Cache<TagStore,Coherence>::handleFill(BlkType *blk, MSHR * mshr,
+ CacheBlk::State new_state,
+ PacketList & writebacks, PacketPtr pkt)
+{
+/*
+#ifndef NDEBUG
+ BlkType *tmp_blk = findBlock(mshr->pkt->getAddr());
+ assert(tmp_blk == blk);
+#endif
+ PacketPtr pkt = mshr->pkt;*/
+ blk = doReplacement(blk, pkt, new_state, writebacks);
+
+ if (pkt->isRead()) {
+ memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
+ }
+
+ blk->whenReady = pkt->finishTime;
+
+
+ // respond to MSHR targets, if any
+
+ // First offset for critical word first calculations
+ int initial_offset = 0;
+
+ if (mshr->hasTargets()) {
+ initial_offset = mshr->getTarget()->getOffset(blkSize);
+ }
+
+ while (mshr->hasTargets()) {
+ PacketPtr target = mshr->getTarget();
+
+ target->flags |= SATISFIED;
+
+ // How many bytes pass the first request is this one
+ int transfer_offset = target->getOffset(blkSize) - initial_offset;
+ if (transfer_offset < 0) {
+ transfer_offset += blkSize;
+ }
+
+ // If critical word (no offset) return first word time
+ Tick completion_time = tags->getHitLatency() +
+ transfer_offset ? pkt->finishTime : pkt->firstWordTime;
+
+ if (target->cmd == Packet::InvalidateReq) {
+ //Mark the blk as invalid now, if it hasn't been already
+ if (blk) {
+ tags->invalidateBlk(blk);
+ blk = NULL;
+ }
+
+ //Also get rid of the invalidate
+ mshr->popTarget();
+
+ DPRINTF(Cache, "Popping off a Invalidate for addr %x\n",
+ pkt->getAddr());
+
+ continue;
+ }
+
+ if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) {
+ assert(target->isWrite() || target->isRead());
+ assert(target->getOffset(blkSize) + target->getSize() <= blkSize);
+ if (target->isWrite()) {
+ if (blk->checkWrite(pkt->req)) {
+ blk->status |= BlkDirty;
+ memcpy(blk->data + target->getOffset(blkSize),
+ target->getPtr<uint8_t>(), target->getSize());
+ }
+ } else {
+ if (pkt->req->isLocked()) {
+ blk->trackLoadLocked(pkt->req);
+ }
+ memcpy(target->getPtr<uint8_t>(),
+ blk->data + target->getOffset(blkSize),
+ target->getSize());
+ }
+ } else {
+ // Invalid access, need to do another request
+ // can occur if block is invalidated, or not correct
+ // permissions
+// mshr->pkt = pkt;
+ break;
+ }
+ respondToMiss(target, completion_time);
+ mshr->popTarget();
+ }
+
+ if (blk) {
+ // Need to write the data into the block
+ updateData(blk, writebacks, !adaptiveCompression || true);
+ }
+
+ return blk;
+}
+
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::handleSnoop(BlkType *blk,
+ CacheBlk::State new_state,
+ PacketPtr &pkt)
+{
+ //Must have the block to supply
+ assert(blk);
+ // Can only supply data, and if it hasn't already been supllied
+ assert(pkt->isRead());
+ assert(!(pkt->flags & SATISFIED));
+ pkt->flags |= SATISFIED;
+ Addr offset = pkt->getOffset(blkSize);
+ assert(offset < blkSize);
+ assert(pkt->getSize() <= blkSize);
+ assert(offset + pkt->getSize() <=blkSize);
+ memcpy(pkt->getPtr<uint8_t>(), blk->data + offset, pkt->getSize());
+
+ handleSnoop(blk, new_state);
+}
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::handleSnoop(BlkType *blk,
+ CacheBlk::State new_state)
+{
+ if (blk && blk->status != new_state) {
+ if ((new_state && BlkValid) == 0) {
+ tags->invalidateBlk(blk);
+ } else {
+ assert(new_state >= 0 && new_state < 128);
+ blk->status = new_state;
+ }
+ }
+}
+
+template<class TagStore, class Coherence>
+PacketPtr
+Cache<TagStore,Coherence>::writebackBlk(BlkType *blk)
+{
+ assert(blk && blk->isValid() && blk->isModified());
+ int data_size = blkSize;
+ data_size = blk->size;
+ if (compressOnWriteback) {
+ // not already compressed
+ // need to compress to ship it
+ assert(data_size == blkSize);
+ uint8_t *tmp_data = new uint8_t[blkSize];
+ data_size = compressionAlg->compress(tmp_data,blk->data,
+ data_size);
+ delete [] tmp_data;
+ }
+
+/* PacketPtr writeback =
+ buildWritebackReq(tags->regenerateBlkAddr(blk->tag, blk->set),
+ blk->asid, blkSize,
+ blk->data, data_size);
+*/
+
+ Request *writebackReq =
+ new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
+ PacketPtr writeback = new Packet(writebackReq, Packet::Writeback, -1);
+ writeback->allocate();
+ memcpy(writeback->getPtr<uint8_t>(),blk->data,blkSize);
+
+ blk->status &= ~BlkDirty;
+ return writeback;
+}
+
+
+template<class TagStore, class Coherence>
+bool
+Cache<TagStore,Coherence>::verifyData(BlkType *blk)
+{
+ bool retval;
+ // The data stored in the blk
+ uint8_t *blk_data = new uint8_t[blkSize];
+ tags->readData(blk, blk_data);
+ // Pointer for uncompressed data, assumed uncompressed
+ uint8_t *tmp_data = blk_data;
+ // The size of the data being stored, assumed uncompressed
+ int data_size = blkSize;
+
+ // If the block is compressed need to uncompress to access
+ if (blk->isCompressed()){
+ // Allocate new storage for the data
+ tmp_data = new uint8_t[blkSize];
+ data_size = compressionAlg->uncompress(tmp_data,blk_data, blk->size);
+ assert(data_size == blkSize);
+ // Don't need to keep blk_data around
+ delete [] blk_data;
+ } else {
+ assert(blkSize == blk->size);
+ }
+
+ retval = memcmp(tmp_data, blk->data, blkSize) == 0;
+ delete [] tmp_data;
+ return retval;
+}
+
+template<class TagStore, class Coherence>
+void
+Cache<TagStore,Coherence>::updateData(BlkType *blk, PacketList &writebacks,
+ bool compress_block)
+{
+ if (storeCompressed && compress_block) {
+ uint8_t *comp_data = new uint8_t[blkSize];
+ int new_size = compressionAlg->compress(comp_data, blk->data, blkSize);
+ if (new_size > (blkSize - tags->getSubBlockSize())){
+ // no benefit to storing it compressed
+ blk->status &= ~BlkCompressed;
+ tags->writeData(blk, blk->data, blkSize,
+ writebacks);
+ } else {
+ // Store the data compressed
+ blk->status |= BlkCompressed;
+ tags->writeData(blk, comp_data, new_size,
+ writebacks);
+ }
+ delete [] comp_data;
+ } else {
+ blk->status &= ~BlkCompressed;
+ tags->writeData(blk, blk->data, blkSize, writebacks);
+ }
+}
+
+template<class TagStore, class Coherence>
+typename Cache<TagStore,Coherence>::BlkType*
+Cache<TagStore,Coherence>::doReplacement(BlkType *blk, PacketPtr &pkt,
+ CacheBlk::State new_state,
+ PacketList &writebacks)
+{
+ if (blk == NULL) {
+ // need to do a replacement
+ BlkList compress_list;
+ blk = tags->findReplacement(pkt, writebacks, compress_list);
+ while (adaptiveCompression && !compress_list.empty()) {
+ updateData(compress_list.front(), writebacks, true);
+ compress_list.pop_front();
+ }
+ if (blk->isValid()) {
+ DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
+ tags->regenerateBlkAddr(blk->tag,blk->set), pkt->getAddr(),
+ (blk->isModified()) ? "writeback" : "clean");
+
+ if (blk->isModified()) {
+ // Need to write the data back
+ writebacks.push_back(writebackBlk(blk));
+ }
+ }
+ blk->tag = tags->extractTag(pkt->getAddr(), blk);
+ } else {
+ // must be a status change
+ // assert(blk->status != new_state);
+ if (blk->status == new_state) warn("Changing state to same value\n");
+ }
+
+ blk->status = new_state;
+ return blk;
+}
+
+
+template<class TagStore, class Coherence>
bool
Cache<TagStore,Coherence>::access(PacketPtr &pkt)
{
@@ -112,7 +545,7 @@ Cache<TagStore,Coherence>::access(PacketPtr &pkt)
prefetcher->handleMiss(pkt, curTick);
}
if (!pkt->req->isUncacheable()) {
- blk = tags->handleAccess(pkt, lat, writebacks);
+ blk = handleAccess(pkt, lat, writebacks);
} else {
size = pkt->getSize();
}
@@ -130,7 +563,7 @@ Cache<TagStore,Coherence>::access(PacketPtr &pkt)
warn("WriteInv doing a fastallocate"
"with an outstanding miss to the same address\n");
}
- blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
+ blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
writebacks);
++fastWrites;
}
@@ -195,7 +628,7 @@ Cache<TagStore,Coherence>::getPacket()
if (!pkt->req->isUncacheable()) {
if (pkt->cmd == Packet::HardPFReq)
misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
- BlkType *blk = tags->findBlock(pkt);
+ BlkType *blk = tags->findBlock(pkt->getAddr());
Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
(blk)? blk->status : 0);
missQueue->setBusCmd(pkt, cmd);
@@ -224,7 +657,7 @@ Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
if (upgrade) {
assert(pkt); //Upgrades need to be fixed
pkt->flags &= ~CACHE_LINE_FILL;
- BlkType *blk = tags->findBlock(pkt);
+ BlkType *blk = tags->findBlock(pkt->getAddr());
CacheBlk::State old_state = (blk) ? blk->status : 0;
CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
if (old_state != new_state)
@@ -233,7 +666,7 @@ Cache<TagStore,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
//Set the state on the upgrade
memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
PacketList writebacks;
- tags->handleFill(blk, mshr, new_state, writebacks, pkt);
+ handleFill(blk, mshr, new_state, writebacks, pkt);
assert(writebacks.empty());
missQueue->handleResponse(pkt, curTick + hitLatency);
}
@@ -275,7 +708,7 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
pkt->getAddr());
- blk = tags->findBlock(pkt);
+ blk = tags->findBlock(pkt->getAddr());
CacheBlk::State old_state = (blk) ? blk->status : 0;
PacketList writebacks;
CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
@@ -284,7 +717,7 @@ Cache<TagStore,Coherence>::handleResponse(PacketPtr &pkt)
"state %i to %i\n",
pkt->getAddr(),
old_state, new_state);
- blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
+ blk = handleFill(blk, (MSHR*)pkt->senderState,
new_state, writebacks, pkt);
while (!writebacks.empty()) {
missQueue->doWriteback(writebacks.front());
@@ -332,7 +765,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
}
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
- BlkType *blk = tags->findBlock(pkt);
+ BlkType *blk = tags->findBlock(pkt->getAddr());
MSHR *mshr = missQueue->findMSHR(blk_addr);
if (coherence->hasProtocol() || pkt->isInvalidate()) {
//@todo Move this into handle bus req
@@ -435,7 +868,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
"now supplying data, new state is %i\n",
pkt->cmdString(), blk_addr, new_state);
- tags->handleSnoop(blk, new_state, pkt);
+ handleSnoop(blk, new_state, pkt);
respondToSnoop(pkt, curTick + hitLatency);
return;
}
@@ -443,7 +876,7 @@ Cache<TagStore,Coherence>::snoop(PacketPtr &pkt)
DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
"new state is %i\n", pkt->cmdString(), blk_addr, new_state);
- tags->handleSnoop(blk, new_state);
+ handleSnoop(blk, new_state);
}
template<class TagStore, class Coherence>
@@ -465,13 +898,6 @@ Cache<TagStore,Coherence>::snoopResponse(PacketPtr &pkt)
}
}
-template<class TagStore, class Coherence>
-void
-Cache<TagStore,Coherence>::invalidateBlk(Addr addr)
-{
- tags->invalidateBlk(addr);
-}
-
/**
* @todo Fix to not assume write allocate
@@ -501,7 +927,7 @@ Cache<TagStore,Coherence>::probe(PacketPtr &pkt, bool update,
PacketList writebacks;
int lat;
- BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
+ BlkType *blk = handleAccess(pkt, lat, writebacks, update);
DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(),
pkt->getAddr(), (blk) ? "hit" : "miss");
@@ -557,7 +983,7 @@ Cache<TagStore,Coherence>::probe(PacketPtr &pkt, bool update,
if (!pkt->req->isUncacheable() /*Uncacheables just go through*/
&& (pkt->cmd != Packet::Writeback)/*Writebacks on miss fall through*/) {
// Fetch the cache block to fill
- BlkType *blk = tags->findBlock(pkt);
+ BlkType *blk = tags->findBlock(pkt->getAddr());
Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
(blk)? blk->status : 0);
@@ -593,7 +1019,7 @@ return 0;
DPRINTF(Cache, "Block for blk addr %x moving from state "
"%i to %i\n", busPkt->getAddr(), old_state, new_state);
- tags->handleFill(blk, busPkt, new_state, writebacks, pkt);
+ handleFill(blk, busPkt, new_state, writebacks, pkt);
//Free the packet
delete busPkt;
@@ -639,7 +1065,7 @@ Cache<TagStore,Coherence>::snoopProbe(PacketPtr &pkt)
}
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
- BlkType *blk = tags->findBlock(pkt);
+ BlkType *blk = tags->findBlock(pkt->getAddr());
MSHR *mshr = missQueue->findMSHR(blk_addr);
CacheBlk::State new_state = 0;
bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
@@ -648,14 +1074,14 @@ Cache<TagStore,Coherence>::snoopProbe(PacketPtr &pkt)
"now supplying data, new state is %i\n",
pkt->cmdString(), blk_addr, new_state);
- tags->handleSnoop(blk, new_state, pkt);
+ handleSnoop(blk, new_state, pkt);
return hitLatency;
}
if (blk)
DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
"new state is %i\n",
pkt->cmdString(), blk_addr, new_state);
- tags->handleSnoop(blk, new_state);
+ handleSnoop(blk, new_state);
return 0;
}
diff --git a/src/mem/cache/prefetch/base_prefetcher.cc b/src/mem/cache/prefetch/base_prefetcher.cc
index a1388fad6..4254800b1 100644
--- a/src/mem/cache/prefetch/base_prefetcher.cc
+++ b/src/mem/cache/prefetch/base_prefetcher.cc
@@ -102,6 +102,26 @@ BasePrefetcher::regStats(const std::string &name)
;
}
+inline bool
+BasePrefetcher::inCache(Addr addr)
+{
+ if (cache->inCache(addr)) {
+ pfCacheHit++;
+ return true;
+ }
+ return false;
+}
+
+inline bool
+BasePrefetcher::inMissQueue(Addr addr)
+{
+ if (cache->inMissQueue(addr)) {
+ pfMSHRHit++;
+ return true;
+ }
+ return false;
+}
+
PacketPtr
BasePrefetcher::getPacket()
{
@@ -118,7 +138,7 @@ BasePrefetcher::getPacket()
pkt = *pf.begin();
pf.pop_front();
if (!cacheCheckPush) {
- keepTrying = inCache(pkt);
+ keepTrying = cache->inCache(pkt->getAddr());
}
if (pf.empty()) {
cache->clearMasterRequest(Request_PF);
@@ -190,7 +210,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
//Check if it is already in the cache
if (cacheCheckPush) {
- if (inCache(prefetch)) {
+ if (cache->inCache(prefetch->getAddr())) {
addr++;
delay++;
continue;
@@ -198,7 +218,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
}
//Check if it is already in the miss_queue
- if (inMissQueue(prefetch->getAddr())) {
+ if (cache->inMissQueue(prefetch->getAddr())) {
addr++;
delay++;
continue;
diff --git a/src/mem/cache/prefetch/base_prefetcher.hh b/src/mem/cache/prefetch/base_prefetcher.hh
index 781d3ab09..2780f5e5a 100644
--- a/src/mem/cache/prefetch/base_prefetcher.hh
+++ b/src/mem/cache/prefetch/base_prefetcher.hh
@@ -36,10 +36,13 @@
#ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
-#include "mem/packet.hh"
#include <list>
+#include "base/statistics.hh"
+#include "mem/packet.hh"
+
class BaseCache;
+
class BasePrefetcher
{
protected:
@@ -95,6 +98,10 @@ class BasePrefetcher
void handleMiss(PacketPtr &pkt, Tick time);
+ bool inCache(Addr addr);
+
+ bool inMissQueue(Addr addr);
+
PacketPtr getPacket();
bool havePending()
@@ -106,10 +113,6 @@ class BasePrefetcher
std::list<Addr> &addresses,
std::list<Tick> &delays) = 0;
- virtual bool inCache(PacketPtr &pkt) = 0;
-
- virtual bool inMissQueue(Addr address) = 0;
-
std::list<PacketPtr>::iterator inPrefetch(Addr address);
};
diff --git a/src/mem/cache/prefetch/ghb_prefetcher.cc b/src/mem/cache/prefetch/ghb_prefetcher.cc
index a6c419113..d7d819a2d 100644
--- a/src/mem/cache/prefetch/ghb_prefetcher.cc
+++ b/src/mem/cache/prefetch/ghb_prefetcher.cc
@@ -31,18 +31,44 @@
/**
* @file
- * GHB Prefetcher template instantiations.
+ * GHB Prefetcher implementation.
*/
-#include "mem/cache/tags/cache_tags.hh"
+#include "mem/cache/prefetch/ghb_prefetcher.hh"
+#include "arch/isa_traits.hh"
-#include "mem/cache/tags/lru.hh"
+void
+GHBPrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays)
+{
+ Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
+ int cpuID = pkt->req->getCpuNum();
+ if (!useCPUId) cpuID = 0;
-#include "mem/cache/prefetch/ghb_prefetcher.hh"
-// Template Instantiations
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
+ int new_stride = blkAddr - last_miss_addr[cpuID];
+ int old_stride = last_miss_addr[cpuID] -
+ second_last_miss_addr[cpuID];
-template class GHBPrefetcher<CacheTags<LRU> >;
+ second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
+ last_miss_addr[cpuID] = blkAddr;
-#endif //DOXYGEN_SHOULD_SKIP_THIS
+ if (new_stride == old_stride) {
+ for (int d=1; d <= degree; d++) {
+ Addr newAddr = blkAddr + d * new_stride;
+ if (this->pageStop &&
+ (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+ (newAddr & ~(TheISA::VMPageSize - 1)))
+ {
+ //Spanned the page, so now stop
+ this->pfSpanPage += degree - d + 1;
+ return;
+ }
+ else
+ {
+ addresses.push_back(newAddr);
+ delays.push_back(latency);
+ }
+ }
+ }
+}
diff --git a/src/mem/cache/prefetch/ghb_prefetcher.hh b/src/mem/cache/prefetch/ghb_prefetcher.hh
index c558a3e64..f31b56dcf 100644
--- a/src/mem/cache/prefetch/ghb_prefetcher.hh
+++ b/src/mem/cache/prefetch/ghb_prefetcher.hh
@@ -30,31 +30,18 @@
/**
* @file
- * Describes a ghb prefetcher based on template policies.
+ * Describes a ghb prefetcher.
*/
#ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
-#include "base/misc.hh" // fatal, panic, and warn
+#include "mem/cache/prefetch/base_prefetcher.hh"
-#include "mem/cache/prefetch/prefetcher.hh"
-
-/**
- * A template-policy based cache. The behavior of the cache can be altered by
- * supplying different template policies. TagStore handles all tag and data
- * storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
- * @sa MissQueue. Coherence handles all coherence policy details @sa
- * UniCoherence, SimpleMultiCoherence.
- */
-template <class TagStore>
-class GHBPrefetcher : public Prefetcher<TagStore>
+class GHBPrefetcher : public BasePrefetcher
{
protected:
- MissBuffer* mq;
- TagStore* tags;
-
Addr second_last_miss_addr[64/*MAX_CPUS*/];
Addr last_miss_addr[64/*MAX_CPUS*/];
@@ -67,48 +54,16 @@ class GHBPrefetcher : public Prefetcher<TagStore>
GHBPrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData,
Tick latency, int degree, bool useCPUId)
- :Prefetcher<TagStore>(size, pageStop, serialSquash,
- cacheCheckPush, onlyData),
- latency(latency), degree(degree), useCPUId(useCPUId)
+ : BasePrefetcher(size, pageStop, serialSquash,
+ cacheCheckPush, onlyData),
+ latency(latency), degree(degree), useCPUId(useCPUId)
{
}
~GHBPrefetcher() {}
void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays)
- {
- Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
- int cpuID = pkt->req->getCpuNum();
- if (!useCPUId) cpuID = 0;
-
-
- int new_stride = blkAddr - last_miss_addr[cpuID];
- int old_stride = last_miss_addr[cpuID] -
- second_last_miss_addr[cpuID];
-
- second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
- last_miss_addr[cpuID] = blkAddr;
-
- if (new_stride == old_stride) {
- for (int d=1; d <= degree; d++) {
- Addr newAddr = blkAddr + d * new_stride;
- if (this->pageStop &&
- (blkAddr & ~(TheISA::VMPageSize - 1)) !=
- (newAddr & ~(TheISA::VMPageSize - 1)))
- {
- //Spanned the page, so now stop
- this->pfSpanPage += degree - d + 1;
- return;
- }
- else
- {
- addresses.push_back(newAddr);
- delays.push_back(latency);
- }
- }
- }
- }
+ std::list<Tick> &delays);
};
#endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
diff --git a/src/mem/cache/prefetch/stride_prefetcher.cc b/src/mem/cache/prefetch/stride_prefetcher.cc
index 2204871cc..8d957182a 100644
--- a/src/mem/cache/prefetch/stride_prefetcher.cc
+++ b/src/mem/cache/prefetch/stride_prefetcher.cc
@@ -34,15 +34,59 @@
* Stride Prefetcher template instantiations.
*/
-#include "mem/cache/tags/cache_tags.hh"
+#include "mem/cache/prefetch/stride_prefetcher.hh"
-#include "mem/cache/tags/lru.hh"
+void
+StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays)
+{
+// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
+ int cpuID = pkt->req->getCpuNum();
+ if (!useCPUId) cpuID = 0;
-#include "mem/cache/prefetch/stride_prefetcher.hh"
+ /* Scan Table for IAddr Match */
+/* std::list<strideEntry*>::iterator iter;
+ for (iter=table[cpuID].begin();
+ iter !=table[cpuID].end();
+ iter++) {
+ if ((*iter)->IAddr == pkt->pc) break;
+ }
+
+ if (iter != table[cpuID].end()) {
+ //Hit in table
+
+ int newStride = blkAddr - (*iter)->MAddr;
+ if (newStride == (*iter)->stride) {
+ (*iter)->confidence++;
+ }
+ else {
+ (*iter)->stride = newStride;
+ (*iter)->confidence--;
+ }
-// Template Instantiations
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
+ (*iter)->MAddr = blkAddr;
-template class StridePrefetcher<CacheTags<LRU> >;
+ for (int d=1; d <= degree; d++) {
+ Addr newAddr = blkAddr + d * newStride;
+ if (this->pageStop &&
+ (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+ (newAddr & ~(TheISA::VMPageSize - 1)))
+ {
+ //Spanned the page, so now stop
+ this->pfSpanPage += degree - d + 1;
+ return;
+ }
+ else
+ {
+ addresses.push_back(newAddr);
+ delays.push_back(latency);
+ }
+ }
+ }
+ else {
+ //Miss in table
+ //Find lowest confidence and replace
-#endif //DOXYGEN_SHOULD_SKIP_THIS
+ }
+*/
+}
diff --git a/src/mem/cache/prefetch/stride_prefetcher.hh b/src/mem/cache/prefetch/stride_prefetcher.hh
index 57e430400..831e60fb4 100644
--- a/src/mem/cache/prefetch/stride_prefetcher.hh
+++ b/src/mem/cache/prefetch/stride_prefetcher.hh
@@ -30,31 +30,18 @@
/**
* @file
- * Describes a strided prefetcher based on template policies.
+ * Describes a strided prefetcher.
*/
#ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
-#include "base/misc.hh" // fatal, panic, and warn
+#include "mem/cache/prefetch/base_prefetcher.hh"
-#include "mem/cache/prefetch/prefetcher.hh"
-
-/**
- * A template-policy based cache. The behavior of the cache can be altered by
- * supplying different template policies. TagStore handles all tag and data
- * storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
- * @sa MissQueue. Coherence handles all coherence policy details @sa
- * UniCoherence, SimpleMultiCoherence.
- */
-template <class TagStore>
-class StridePrefetcher : public Prefetcher<TagStore>
+class StridePrefetcher : public BasePrefetcher
{
protected:
- MissBuffer* mq;
- TagStore* tags;
-
class strideEntry
{
public:
@@ -84,66 +71,16 @@ class StridePrefetcher : public Prefetcher<TagStore>
StridePrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData,
Tick latency, int degree, bool useCPUId)
- :Prefetcher<TagStore>(size, pageStop, serialSquash,
- cacheCheckPush, onlyData),
- latency(latency), degree(degree), useCPUId(useCPUId)
+ : BasePrefetcher(size, pageStop, serialSquash,
+ cacheCheckPush, onlyData),
+ latency(latency), degree(degree), useCPUId(useCPUId)
{
}
~StridePrefetcher() {}
void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays)
- {
-// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
- int cpuID = pkt->req->getCpuNum();
- if (!useCPUId) cpuID = 0;
-
- /* Scan Table for IAddr Match */
-/* std::list<strideEntry*>::iterator iter;
- for (iter=table[cpuID].begin();
- iter !=table[cpuID].end();
- iter++) {
- if ((*iter)->IAddr == pkt->pc) break;
- }
-
- if (iter != table[cpuID].end()) {
- //Hit in table
-
- int newStride = blkAddr - (*iter)->MAddr;
- if (newStride == (*iter)->stride) {
- (*iter)->confidence++;
- }
- else {
- (*iter)->stride = newStride;
- (*iter)->confidence--;
- }
-
- (*iter)->MAddr = blkAddr;
-
- for (int d=1; d <= degree; d++) {
- Addr newAddr = blkAddr + d * newStride;
- if (this->pageStop &&
- (blkAddr & ~(TheISA::VMPageSize - 1)) !=
- (newAddr & ~(TheISA::VMPageSize - 1)))
- {
- //Spanned the page, so now stop
- this->pfSpanPage += degree - d + 1;
- return;
- }
- else
- {
- addresses.push_back(newAddr);
- delays.push_back(latency);
- }
- }
- }
- else {
- //Miss in table
- //Find lowest confidence and replace
-
- }
-*/ }
+ std::list<Tick> &delays);
};
#endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
diff --git a/src/mem/cache/prefetch/tagged_prefetcher_impl.hh b/src/mem/cache/prefetch/tagged_prefetcher.cc
index b3d4284c7..bc1fa46b9 100644
--- a/src/mem/cache/prefetch/tagged_prefetcher_impl.hh
+++ b/src/mem/cache/prefetch/tagged_prefetcher.cc
@@ -36,20 +36,18 @@
#include "arch/isa_traits.hh"
#include "mem/cache/prefetch/tagged_prefetcher.hh"
-template <class TagStore>
-TaggedPrefetcher<TagStore>::
+TaggedPrefetcher::
TaggedPrefetcher(int size, bool pageStop, bool serialSquash,
bool cacheCheckPush, bool onlyData,
Tick latency, int degree)
- :Prefetcher<TagStore>(size, pageStop, serialSquash,
- cacheCheckPush, onlyData),
- latency(latency), degree(degree)
+ : BasePrefetcher(size, pageStop, serialSquash,
+ cacheCheckPush, onlyData),
+ latency(latency), degree(degree)
{
}
-template <class TagStore>
void
-TaggedPrefetcher<TagStore>::
+TaggedPrefetcher::
calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
std::list<Tick> &delays)
{
diff --git a/src/mem/cache/prefetch/tagged_prefetcher.hh b/src/mem/cache/prefetch/tagged_prefetcher.hh
index dc2aaec50..b9d228aba 100644
--- a/src/mem/cache/prefetch/tagged_prefetcher.hh
+++ b/src/mem/cache/prefetch/tagged_prefetcher.hh
@@ -30,29 +30,18 @@
/**
* @file
- * Describes a tagged prefetcher based on template policies.
+ * Describes a tagged prefetcher.
*/
#ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
#define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
-#include "mem/cache/prefetch/prefetcher.hh"
+#include "mem/cache/prefetch/base_prefetcher.hh"
-/**
- * A template-policy based cache. The behavior of the cache can be altered by
- * supplying different template policies. TagStore handles all tag and data
- * storage @sa TagStore. MissBuffer handles all misses and writes/writebacks
- * @sa MissQueue. Coherence handles all coherence policy details @sa
- * UniCoherence, SimpleMultiCoherence.
- */
-template <class TagStore>
-class TaggedPrefetcher : public Prefetcher<TagStore>
+class TaggedPrefetcher : public BasePrefetcher
{
protected:
- MissBuffer* mq;
- TagStore* tags;
-
Tick latency;
int degree;
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index a58ddaff8..42a1fe34f 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -153,12 +153,9 @@ FALRU::probe(Addr addr) const
}
void
-FALRU::invalidateBlk(Addr addr)
+FALRU::invalidateBlk(FALRU::BlkType *blk)
{
- Addr blkAddr = blkAlign(addr);
- FALRUBlk* blk = (*tagHash.find(blkAddr)).second;
if (blk) {
- assert(blk->tag == blkAddr);
blk->status = 0;
blk->isTouched = false;
tagsInUse--;
@@ -202,44 +199,6 @@ FALRU::findBlock(Addr addr, int &lat, int *inCache)
return blk;
}
-FALRUBlk*
-FALRU::findBlock(PacketPtr &pkt, int &lat, int *inCache)
-{
- Addr addr = pkt->getAddr();
-
- accesses++;
- int tmp_in_cache = 0;
- Addr blkAddr = blkAlign(addr);
- FALRUBlk* blk = hashLookup(blkAddr);
-
- if (blk && blk->isValid()) {
- assert(blk->tag == blkAddr);
- tmp_in_cache = blk->inCache;
- for (int i = 0; i < numCaches; i++) {
- if (1<<i & blk->inCache) {
- hits[i]++;
- } else {
- misses[i]++;
- }
- }
- hits[numCaches]++;
- if (blk != head){
- moveToHead(blk);
- }
- } else {
- blk = NULL;
- for (int i = 0; i < numCaches+1; ++i) {
- misses[i]++;
- }
- }
- if (inCache) {
- *inCache = tmp_in_cache;
- }
-
- lat = hitLatency;
- //assert(check());
- return blk;
-}
FALRUBlk*
FALRU::findBlock(Addr addr) const
diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh
index 2db89d603..dabbda740 100644
--- a/src/mem/cache/tags/fa_lru.hh
+++ b/src/mem/cache/tags/fa_lru.hh
@@ -173,11 +173,10 @@ public:
bool probe(Addr addr) const;
/**
- * Invalidate the cache block that contains the given addr.
- * @param asid The address space ID.
- * @param addr The address to invalidate.
+ * Invalidate a cache block.
+ * @param blk The block to invalidate.
*/
- void invalidateBlk(Addr addr);
+ void invalidateBlk(BlkType *blk);
/**
* Find the block in the cache and update the replacement data. Returns
@@ -191,16 +190,6 @@ public:
FALRUBlk* findBlock(Addr addr, int &lat, int *inCache = 0);
/**
- * Find the block in the cache and update the replacement data. Returns
- * the access latency and the in cache flags as a side effect
- * @param pkt The req whose block to find
- * @param lat The latency of the access.
- * @param inCache The FALRUBlk::inCache flags.
- * @return Pointer to the cache block.
- */
- FALRUBlk* findBlock(PacketPtr &pkt, int &lat, int *inCache = 0);
-
- /**
* Find the block in the cache, do not update the replacement data.
* @param addr The address to look for.
* @param asid The address space ID.
diff --git a/src/mem/cache/tags/iic.cc b/src/mem/cache/tags/iic.cc
index f4e870659..38f9662ea 100644
--- a/src/mem/cache/tags/iic.cc
+++ b/src/mem/cache/tags/iic.cc
@@ -284,65 +284,6 @@ IIC::findBlock(Addr addr, int &lat)
return tag_ptr;
}
-IICTag*
-IIC::findBlock(PacketPtr &pkt, int &lat)
-{
- Addr addr = pkt->getAddr();
-
- Addr tag = extractTag(addr);
- unsigned set = hash(addr);
- int set_lat;
-
- unsigned long chain_ptr;
-
- if (PROFILE_IIC)
- setAccess.sample(set);
-
- IICTag *tag_ptr = sets[set].findTag(tag, chain_ptr);
- set_lat = 1;
- if (tag_ptr == NULL && chain_ptr != tagNull) {
- int secondary_depth;
- tag_ptr = secondaryChain(tag, chain_ptr, &secondary_depth);
- set_lat += secondary_depth;
- // set depth for statistics fix this later!!! egh
- sets[set].depth = set_lat;
-
- if (tag_ptr != NULL) {
- /* need to move tag into primary table */
- // need to preserve chain: fix this egh
- sets[set].tags[assoc-1]->chain_ptr = tag_ptr->chain_ptr;
- tagSwap(tag_ptr - tagStore, sets[set].tags[assoc-1] - tagStore);
- tag_ptr = sets[set].findTag(tag, chain_ptr);
- assert(tag_ptr!=NULL);
- }
-
- }
- set_lat = set_lat * hashDelay + hitLatency;
- if (tag_ptr != NULL) {
- // IIC replacement: if this is not the first element of
- // list, reorder
- sets[set].moveToHead(tag_ptr);
-
- hitHashDepth.sample(sets[set].depth);
- hashHit++;
- hitDepthTotal += sets[set].depth;
- tag_ptr->status |= BlkReferenced;
- lat = set_lat;
- if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) {
- lat = tag_ptr->whenReady - curTick;
- }
-
- tag_ptr->refCount += 1;
- }
- else {
- // fall through: cache block not found, not a hit...
- missHashDepth.sample(sets[set].depth);
- hashMiss++;
- missDepthTotal += sets[set].depth;
- lat = set_lat;
- }
- return tag_ptr;
-}
IICTag*
IIC::findBlock(Addr addr) const
@@ -695,9 +636,8 @@ IIC::compressBlock(unsigned long index)
}
void
-IIC::invalidateBlk(Addr addr)
+IIC::invalidateBlk(IIC::BlkType *tag_ptr)
{
- IICTag* tag_ptr = findBlock(addr);
if (tag_ptr) {
for (int i = 0; i < tag_ptr->numData; ++i) {
dataReferenceCount[tag_ptr->data_ptr[i]]--;
diff --git a/src/mem/cache/tags/iic.hh b/src/mem/cache/tags/iic.hh
index 92bd6da1d..d0663d330 100644
--- a/src/mem/cache/tags/iic.hh
+++ b/src/mem/cache/tags/iic.hh
@@ -435,11 +435,10 @@ class IIC : public BaseTags
void compressBlock(unsigned long index);
/**
- * Invalidate the block containing the address.
- * @param asid The address space ID.
- * @param addr The address to invalidate.
+ * Invalidate a block.
+ * @param blk The block to invalidate.
*/
- void invalidateBlk(Addr addr);
+ void invalidateBlk(BlkType *blk);
/**
* Find the block and update the replacement data. This call also returns
@@ -452,15 +451,6 @@ class IIC : public BaseTags
IICTag* findBlock(Addr addr, int &lat);
/**
- * Find the block and update the replacement data. This call also returns
- * the access latency as a side effect.
- * @param pkt The req whose block to find
- * @param lat The access latency.
- * @return A pointer to the block found, if any.
- */
- IICTag* findBlock(PacketPtr &pkt, int &lat);
-
- /**
* Find the block, do not update the replacement data.
* @param addr The address to find.
* @param asid The address space ID.
diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc
index 31d29aae6..102bb3506 100644
--- a/src/mem/cache/tags/lru.cc
+++ b/src/mem/cache/tags/lru.cc
@@ -183,27 +183,6 @@ LRU::findBlock(Addr addr, int &lat)
return blk;
}
-LRUBlk*
-LRU::findBlock(PacketPtr &pkt, int &lat)
-{
- Addr addr = pkt->getAddr();
-
- Addr tag = extractTag(addr);
- unsigned set = extractSet(addr);
- LRUBlk *blk = sets[set].findBlk(tag);
- lat = hitLatency;
- if (blk != NULL) {
- // move this block to head of the MRU list
- sets[set].moveToHead(blk);
- if (blk->whenReady > curTick
- && blk->whenReady - curTick > hitLatency) {
- lat = blk->whenReady - curTick;
- }
- blk->refCount += 1;
- }
-
- return blk;
-}
LRUBlk*
LRU::findBlock(Addr addr) const
@@ -240,9 +219,8 @@ LRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
}
void
-LRU::invalidateBlk(Addr addr)
+LRU::invalidateBlk(LRU::BlkType *blk)
{
- LRUBlk *blk = findBlock(addr);
if (blk) {
blk->status = 0;
blk->isTouched = false;
diff --git a/src/mem/cache/tags/lru.hh b/src/mem/cache/tags/lru.hh
index fed688283..4b94adca6 100644
--- a/src/mem/cache/tags/lru.hh
+++ b/src/mem/cache/tags/lru.hh
@@ -161,20 +161,10 @@ public:
bool probe(Addr addr) const;
/**
- * Invalidate the block containing the given address.
- * @param asid The address space ID.
- * @param addr The address to invalidate.
- */
- void invalidateBlk(Addr addr);
-
- /**
- * Finds the given address in the cache and update replacement data.
- * Returns the access latency as a side effect.
- * @param pkt The request whose block to find.
- * @param lat The access latency.
- * @return Pointer to the cache block if found.
+ * Invalidate the given block.
+ * @param blk The block to invalidate.
*/
- LRUBlk* findBlock(PacketPtr &pkt, int &lat);
+ void invalidateBlk(BlkType *blk);
/**
* Finds the given address in the cache and update replacement data.
diff --git a/src/mem/cache/tags/split.cc b/src/mem/cache/tags/split.cc
index bc74f0e0f..5ac87eaba 100644
--- a/src/mem/cache/tags/split.cc
+++ b/src/mem/cache/tags/split.cc
@@ -266,58 +266,6 @@ Split::probe(Addr addr) const
return success;
}
-SplitBlk*
-Split::findBlock(PacketPtr &pkt, int &lat)
-{
-
- Addr aligned = blkAlign(pkt->getAddr());
-
- if (memHash.count(aligned)) {
- memHash[aligned]++;
- } else if (pkt->nic_pkt()) {
- memHash[aligned] = 1;
- }
-
- SplitBlk *blk = lru->findBlock(pkt->getAddr(), lat);
- if (blk) {
- if (pkt->nic_pkt()) {
- NR_CP_hits++;
- } else {
- CR_CP_hits++;
- }
- } else {
- if (lifo && lifo_net) {
- blk = lifo_net->findBlock(pkt->getAddr(), lat);
-
- } else if (lru_net) {
- blk = lru_net->findBlock(pkt->getAddr(), lat);
- }
- if (blk) {
- if (pkt->nic_pkt()) {
- NR_NP_hits++;
- } else {
- CR_NP_hits++;
- }
- }
- }
-
- if (blk) {
- Tick latency = curTick - blk->ts;
- if (blk->isNIC) {
- if (!blk->isUsed && !pkt->nic_pkt()) {
- useByCPUCycleDist.sample(latency);
- nicUseByCPUCycleTotal += latency;
- nicBlksUsedByCPU++;
- }
- }
- blk->isUsed = true;
-
- if (pkt->nic_pkt()) {
- DPRINTF(Split, "found block in partition %d\n", blk->part);
- }
- }
- return blk;
-}
SplitBlk*
Split::findBlock(Addr addr, int &lat)
@@ -403,14 +351,16 @@ Split::findReplacement(PacketPtr &pkt, PacketList &writebacks,
}
void
-Split::invalidateBlk(Addr addr)
+Split::invalidateBlk(Split::BlkType *blk)
{
- SplitBlk *blk = lru->findBlock(addr);
if (!blk) {
+ fatal("FIXME!\n");
+#if 0
if (lifo && lifo_net)
blk = lifo_net->findBlock(addr);
else if (lru_net)
blk = lru_net->findBlock(addr);
+#endif
if (!blk)
return;
diff --git a/src/mem/cache/tags/split.hh b/src/mem/cache/tags/split.hh
index 898d3c7a0..e6ace0921 100644
--- a/src/mem/cache/tags/split.hh
+++ b/src/mem/cache/tags/split.hh
@@ -184,11 +184,10 @@ class Split : public BaseTags
bool probe(Addr addr) const;
/**
- * Invalidate the block containing the given address.
- * @param asid The address space ID.
- * @param addr The address to invalidate.
+ * Invalidate the given block.
+ * @param blk The block to invalidate.
*/
- void invalidateBlk(Addr addr);
+ void invalidateBlk(BlkType *blk);
/**
* Finds the given address in the cache and update replacement data.
@@ -201,15 +200,6 @@ class Split : public BaseTags
SplitBlk* findBlock(Addr addr, int &lat);
/**
- * Finds the given address in the cache and update replacement data.
- * Returns the access latency as a side effect.
- * @param pkt The memory request whose block to find
- * @param lat The access latency.
- * @return Pointer to the cache block if found.
- */
- SplitBlk* findBlock(PacketPtr &pkt, int &lat);
-
- /**
* Finds the given address in the cache, do not update replacement data.
* @param addr The address to find.
* @param asid The address space ID.
diff --git a/src/mem/cache/tags/split_lifo.cc b/src/mem/cache/tags/split_lifo.cc
index 302e2aaeb..792ff8fa7 100644
--- a/src/mem/cache/tags/split_lifo.cc
+++ b/src/mem/cache/tags/split_lifo.cc
@@ -254,31 +254,6 @@ SplitLIFO::findBlock(Addr addr, int &lat)
return blk;
}
-SplitBlk*
-SplitLIFO::findBlock(PacketPtr &pkt, int &lat)
-{
- Addr addr = pkt->getAddr();
-
- Addr tag = extractTag(addr);
- unsigned set = extractSet(addr);
- SplitBlk *blk = sets[set].findBlk(tag);
-
- if (blk) {
- DPRINTF(Split, "Found LIFO blk %#x in set %d, with tag %#x\n",
- addr, set, tag);
- hits++;
-
- if (twoQueue) {
- blk->isUsed = true;
- sets[set].moveToFirstIn(blk);
- } else {
- sets[set].moveToLastIn(blk);
- }
- }
- lat = hitLatency;
-
- return blk;
-}
SplitBlk*
SplitLIFO::findBlock(Addr addr) const
@@ -335,9 +310,8 @@ SplitLIFO::findReplacement(PacketPtr &pkt, PacketList &writebacks,
}
void
-SplitLIFO::invalidateBlk(Addr addr)
+SplitLIFO::invalidateBlk(SplitLIFO::BlkType *blk)
{
- SplitBlk *blk = findBlock(addr);
if (blk) {
blk->status = 0;
blk->isTouched = false;
diff --git a/src/mem/cache/tags/split_lifo.hh b/src/mem/cache/tags/split_lifo.hh
index 6c3befe37..9001cdb14 100644
--- a/src/mem/cache/tags/split_lifo.hh
+++ b/src/mem/cache/tags/split_lifo.hh
@@ -184,11 +184,10 @@ public:
bool probe( Addr addr) const;
/**
- * Invalidate the block containing the given address.
- * @param asid The address space ID.
- * @param addr The address to invalidate.
+ * Invalidate the given block.
+ * @param blk The block to invalidate.
*/
- void invalidateBlk(Addr addr);
+ void invalidateBlk(BlkType *blk);
/**
* Finds the given address in the cache and update replacement data.
@@ -201,15 +200,6 @@ public:
SplitBlk* findBlock(Addr addr, int &lat);
/**
- * Finds the given address in the cache and update replacement data.
- * Returns the access latency as a side effect.
- * @param pkt The req whose block to find
- * @param lat The access latency.
- * @return Pointer to the cache block if found.
- */
- SplitBlk* findBlock(PacketPtr &pkt, int &lat);
-
- /**
* Finds the given address in the cache, do not update replacement data.
* @param addr The address to find.
* @param asid The address space ID.
diff --git a/src/mem/cache/tags/split_lru.cc b/src/mem/cache/tags/split_lru.cc
index 11c9a5d64..c37d72cb7 100644
--- a/src/mem/cache/tags/split_lru.cc
+++ b/src/mem/cache/tags/split_lru.cc
@@ -202,27 +202,6 @@ SplitLRU::findBlock(Addr addr, int &lat)
return blk;
}
-SplitBlk*
-SplitLRU::findBlock(PacketPtr &pkt, int &lat)
-{
- Addr addr = pkt->getAddr();
-
- Addr tag = extractTag(addr);
- unsigned set = extractSet(addr);
- SplitBlk *blk = sets[set].findBlk(tag);
- lat = hitLatency;
- if (blk != NULL) {
- // move this block to head of the MRU list
- sets[set].moveToHead(blk);
- if (blk->whenReady > curTick && blk->whenReady - curTick > hitLatency){
- lat = blk->whenReady - curTick;
- }
- blk->refCount += 1;
- hits++;
- }
-
- return blk;
-}
SplitBlk*
SplitLRU::findBlock(Addr addr) const
@@ -261,9 +240,8 @@ SplitLRU::findReplacement(PacketPtr &pkt, PacketList &writebacks,
}
void
-SplitLRU::invalidateBlk(Addr addr)
+SplitLRU::invalidateBlk(SplitLRU::BlkType *blk)
{
- SplitBlk *blk = findBlock(addr);
if (blk) {
blk->status = 0;
blk->isTouched = false;
diff --git a/src/mem/cache/tags/split_lru.hh b/src/mem/cache/tags/split_lru.hh
index 6160d59e5..e17a478d3 100644
--- a/src/mem/cache/tags/split_lru.hh
+++ b/src/mem/cache/tags/split_lru.hh
@@ -167,11 +167,10 @@ public:
bool probe(Addr addr) const;
/**
- * Invalidate the block containing the given address.
- * @param asid The address space ID.
- * @param addr The address to invalidate.
+ * Invalidate the given block.
+ * @param blk The block to invalidate.
*/
- void invalidateBlk(Addr addr);
+ void invalidateBlk(BlkType *blk);
/**
* Finds the given address in the cache and update replacement data.
@@ -184,15 +183,6 @@ public:
SplitBlk* findBlock(Addr addr, int &lat);
/**
- * Finds the given address in the cache and update replacement data.
- * Returns the access latency as a side effect.
- * @param pkt The req whose block to find.
- * @param lat The access latency.
- * @return Pointer to the cache block if found.
- */
- SplitBlk* findBlock(PacketPtr &pkt, int &lat);
-
- /**
* Finds the given address in the cache, do not update replacement data.
* @param addr The address to find.
* @param asid The address space ID.
diff --git a/src/mem/physical.cc b/src/mem/physical.cc
index 6610e547d..7d616a4e5 100644
--- a/src/mem/physical.cc
+++ b/src/mem/physical.cc
@@ -42,6 +42,7 @@
#include "arch/isa_traits.hh"
#include "base/misc.hh"
#include "config/full_system.hh"
+#include "mem/packet_access.hh"
#include "mem/physical.hh"
#include "sim/builder.hh"
#include "sim/eventq.hh"
@@ -203,18 +204,60 @@ PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
if (pkt->req->isLocked()) {
trackLoadLocked(pkt->req);
}
- DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n",
- pkt->getSize(), pkt->getAddr());
memcpy(pkt->getPtr<uint8_t>(),
pmemAddr + pkt->getAddr() - params()->addrRange.start,
pkt->getSize());
+#if TRACING_ON
+ switch (pkt->getSize()) {
+ case sizeof(uint64_t):
+ DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
+ break;
+ case sizeof(uint32_t):
+ DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
+ break;
+ case sizeof(uint16_t):
+ DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
+ break;
+ case sizeof(uint8_t):
+ DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
+ break;
+ default:
+ DPRINTF(MemoryAccess, "Read of size %i on address 0x%x\n",
+ pkt->getSize(), pkt->getAddr());
+ }
+#endif
}
else if (pkt->isWrite()) {
if (writeOK(pkt->req)) {
- DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n",
- pkt->getSize(), pkt->getAddr());
memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
pkt->getPtr<uint8_t>(), pkt->getSize());
+#if TRACING_ON
+ switch (pkt->getSize()) {
+ case sizeof(uint64_t):
+ DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint64_t>());
+ break;
+ case sizeof(uint32_t):
+ DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint32_t>());
+ break;
+ case sizeof(uint16_t):
+ DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint16_t>());
+ break;
+ case sizeof(uint8_t):
+ DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n",
+ pkt->getSize(), pkt->getAddr(),pkt->get<uint8_t>());
+ break;
+ default:
+ DPRINTF(MemoryAccess, "Write of size %i on address 0x%x\n",
+ pkt->getSize(), pkt->getAddr());
+ }
+#endif
}
}
else if (pkt->isInvalidate()) {