From 6841f863c5dee6ce2028ba647254ec9ad27a57fd Mon Sep 17 00:00:00 2001 From: Ali Saidi Date: Mon, 18 Dec 2006 03:37:52 -0500 Subject: move the twinx loads to the correct opcode and add asis 0x24 and 0x27 Make the TLB ok to translate QUAD_LDD src/arch/sparc/isa/decoder.isa: move the twinx loads to the correct opcode. src/arch/sparc/tlb.cc: Make QUAD_LDD asi ok to execute --HG-- extra : convert_revision : 2a44d1c9e4edb627079fc05776c28d918c8508ce --- src/arch/sparc/isa/decoder.isa | 42 +++++++++++++++++++++++++----------------- src/arch/sparc/tlb.cc | 3 +++ 2 files changed, 28 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/arch/sparc/isa/decoder.isa b/src/arch/sparc/isa/decoder.isa index bbc6a8c4b..e2bebd987 100644 --- a/src/arch/sparc/isa/decoder.isa +++ b/src/arch/sparc/isa/decoder.isa @@ -1060,11 +1060,31 @@ decode OP default Unknown::unknown() 0x10: lduwa({{Rd = Mem.uw;}}, {{EXT_ASI}}); 0x11: lduba({{Rd = Mem.ub;}}, {{EXT_ASI}}); 0x12: lduha({{Rd = Mem.uhw;}}, {{EXT_ASI}}); - 0x13: ldtwa({{ - uint64_t val = Mem.udw; - RdLow = val<31:0>; - RdHigh = val<63:32>; - }}, {{EXT_ASI}}); + 0x13: decode EXT_ASI { + //ASI_QUAD_LDD + 0x24: TwinLoad::ldtx_quad_ldd( + {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); + //ASI_LDTX_REAL + 0x26: TwinLoad::ldtx_real( + {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); + //ASI_LDTX_N + 0x27: TwinLoad::ldtx_n( + {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); + //ASI_LDTX_L + 0x2C: TwinLoad::ldtx_l( + {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); + //ASI_LDTX_REAL_L + 0x2E: TwinLoad::ldtx_real_l( + {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); + //ASI_LDTX_N_L + 0x2F: TwinLoad::ldtx_n_l( + {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); + default: ldtwa({{ + uint64_t val = Mem.udw; + RdLow = val<31:0>; + RdHigh = val<63:32>; + }}, {{EXT_ASI}}); + } } format StoreAlt { 0x14: stwa({{Mem.uw = Rd;}}, {{EXT_ASI}}); @@ -1126,18 +1146,6 @@ decode OP default Unknown::unknown() 0x15: FailUnimpl::lddfa_real_io(); //ASI_REAL_IO_LITTLE 0x1D: FailUnimpl::lddfa_real_io_l(); - //ASI_LDTX_REAL - 0x26: TwinLoad::ldtx_real( - {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); - //ASI_LDTX_N - 0x27: TwinLoad::ldtx_n( - {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); - //ASI_LDTX_REAL_L - 0x2E: TwinLoad::ldtx_real_l( - {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); - //ASI_LDTX_N_L - 0x2F: TwinLoad::ldtx_n_l( - {{RdTwin.udw = Mem.udw}}, {{EXT_ASI}}); //ASI_PRIMARY 0x80: FailUnimpl::lddfa_p(); //ASI_PRIMARY_LITTLE diff --git a/src/arch/sparc/tlb.cc b/src/arch/sparc/tlb.cc index 675287d18..1eb3aa53b 100644 --- a/src/arch/sparc/tlb.cc +++ b/src/arch/sparc/tlb.cc @@ -575,6 +575,9 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) if (write && asi == ASI_LDTX_P) // block init store (like write hint64) goto continueDtbFlow; + if (!write && asi == ASI_QUAD_LDD) + goto continueDtbFlow; + if (AsiIsTwin(asi)) panic("Twin ASIs not supported\n"); if (AsiIsPartialStore(asi)) -- cgit v1.2.3 From 9b40a13728236f1d1d89d5a2169dab28f537d18c Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Mon, 18 Dec 2006 14:07:52 -0800 Subject: cast chars to int when we want to print integers so we get a number instead of a character --HG-- extra : convert_revision : 7bfa88ba23ad057b751eb01a80416d9f72cfe81a --- src/base/cprintf_formats.hh | 6 +++--- src/unittest/cprintftest.cc | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/base/cprintf_formats.hh b/src/base/cprintf_formats.hh index 58ee7f795..3ea20446d 100644 --- a/src/base/cprintf_formats.hh +++ b/src/base/cprintf_formats.hh @@ -288,13 +288,13 @@ format_integer(std::ostream &out, const T &data, Format &fmt) { _format_integer(out, data, fmt); } inline void format_integer(std::ostream &out, char data, Format &fmt) -{ _format_integer(out, data, fmt); } +{ _format_integer(out, (int)data, fmt); } inline void format_integer(std::ostream &out, unsigned char data, Format &fmt) -{ _format_integer(out, data, fmt); } +{ _format_integer(out, (int)data, fmt); } inline void format_integer(std::ostream &out, signed char data, Format &fmt) -{ _format_integer(out, data, fmt); } +{ _format_integer(out, (int)data, fmt); } #if 0 inline void format_integer(std::ostream &out, short data, Format &fmt) diff --git a/src/unittest/cprintftest.cc b/src/unittest/cprintftest.cc index 9c3eb0cd6..a05426356 100644 --- a/src/unittest/cprintftest.cc +++ b/src/unittest/cprintftest.cc @@ -43,6 +43,7 @@ main() char foo[9]; cprintf("%s\n", foo); + cprintf("%d\n", 'A'); cprintf("%shits%%s + %smisses%%s\n", "test", "test"); cprintf("%%s%-10s %c he went home \'\"%d %#o %#x %1.5f %1.2E\n", "hello", 'A', 1, 0xff, 0xfffffffffffffULL, 3.141592653589, 1.1e10); -- cgit v1.2.3 From 0e78386874f15ad28ebc1a0928256e3cf4301008 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Mon, 18 Dec 2006 14:08:42 -0800 Subject: Fix unittest compiles --HG-- extra : convert_revision : 1163437081e1f1eab3f4512d04317dc94a673b9b --- src/unittest/Makefile | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/unittest/Makefile b/src/unittest/Makefile index 1f0584066..0c11b9563 100644 --- a/src/unittest/Makefile +++ b/src/unittest/Makefile @@ -47,48 +47,48 @@ base/traceflags.cc base/traceflags.hh: $(SRCDIR)/base/traceflags.py cd base; \ $(PYTHON) $< -bitvectest: test/bitvectest.cc +bitvectest: unittest/bitvectest.cc $(CXX) $(CCFLAGS) -o $@ $^ -circletest: test/circletest.cc base/circlebuf.cc +circletest: unittest/circletest.cc base/circlebuf.cc $(CXX) $(CCFLAGS) -o $@ $^ -cprintftest: test/cprintftest.cc base/cprintf.cc +cprintftest: unittest/cprintftest.cc base/cprintf.cc $(CXX) $(CCFLAGS) -o $@ $^ -initest: test/initest.cc base/str.cc base/inifile.cc base/cprintf.cc +initest: unittest/initest.cc base/str.cc base/inifile.cc base/cprintf.cc $(CXX) $(CCFLAGS) -o $@ $^ -lrutest: test/lru_test.cc +lrutest: unittest/lru_test.cc $(CXX) $(CCFLAGS) -o $@ $^ -nmtest: test/nmtest.cc base/output.cc base/hostinfo.cc base/cprintf.cc base/misc.cc base/loader/object_file.cc base/loader/symtab.cc base/misc.cc base/str.cc base/loader/aout_object.cc base/loader/ecoff_object.cc base/loader/elf_object.cc +nmtest: unittest/nmtest.cc base/output.cc base/hostinfo.cc base/cprintf.cc base/misc.cc base/loader/object_file.cc base/loader/symtab.cc base/misc.cc base/str.cc base/loader/aout_object.cc base/loader/ecoff_object.cc base/loader/elf_object.cc $(CXX) $(CCFLAGS) -I/n/ziff/z/binkertn/build/work/ALPHA_FS -lelf -o $@ $^ -offtest: test/offtest.cc +offtest: unittest/offtest.cc $(CXX) $(CCFLAGS) -o $@ $^ -rangetest: test/rangetest.cc base/range.cc base/str.cc +rangetest: unittest/rangetest.cc base/range.cc base/str.cc $(CXX) $(CCFLAGS) -o $@ $^ STATTEST+= base/cprintf.cc base/hostinfo.cc base/misc.cc base/mysql.cc STATTEST+= base/python.cc base/str.cc base/time.cc STATTEST+= base/statistics.cc base/stats/mysql.cc base/stats/python.cc STATTEST+= base/stats/statdb.cc base/stats/text.cc base/stats/visit.cc -STATTEST+= test/stattest.cc +STATTEST+= unittest/stattest.cc stattest: $(STATTEST) $(CXX) $(CCFLAGS) $(MYSQL) -o $@ $^ -strnumtest: test/strnumtest.cc base/str.cc +strnumtest: unittest/strnumtest.cc base/str.cc $(CXX) $(CCFLAGS) -o $@ $^ -symtest: test/symtest.cc base/misc.cc base/symtab.cc base/str.cc +symtest: unittest/symtest.cc base/misc.cc base/symtab.cc base/str.cc $(CXX) $(CCFLAGS) -o $@ $^ -tokentest: test/tokentest.cc base/str.cc +tokentest: unittest/tokentest.cc base/str.cc $(CXX) $(CCFLAGS) -o $@ $^ -TRACE+=test/tracetest.cc base/trace.cc base/trace_flags.cc base/cprintf.cc +TRACE+=unittest/tracetest.cc base/trace.cc base/trace_flags.cc base/cprintf.cc TRACE+=base/str.cc base/misc.cc tracetest: $(TRACE) $(CXX) $(CCFLAGS) -o $@ $^ -- cgit v1.2.3 From 1428b0de7d3d8903e0674d671829219b554d7aa3 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Mon, 18 Dec 2006 20:47:12 -0800 Subject: Get rid of generic CacheTags object (fold back into Cache). --HG-- extra : convert_revision : 8769bd8cc358ab3cbbdbbcd909b2e0f1515e09da --- src/SConscript | 1 - src/mem/cache/cache.cc | 22 +- src/mem/cache/cache.hh | 181 ++++++++++- src/mem/cache/cache_builder.cc | 91 +++--- src/mem/cache/cache_impl.hh | 471 ++++++++++++++++++++++++++-- src/mem/cache/prefetch/ghb_prefetcher.cc | 4 +- src/mem/cache/prefetch/stride_prefetcher.cc | 4 +- 7 files changed, 683 insertions(+), 91 deletions(-) (limited to 'src') diff --git a/src/SConscript b/src/SConscript index f54e1de0d..3149f0e59 100644 --- a/src/SConscript +++ b/src/SConscript @@ -118,7 +118,6 @@ base_sources = Split(''' mem/cache/prefetch/stride_prefetcher.cc mem/cache/prefetch/tagged_prefetcher.cc mem/cache/tags/base_tags.cc - mem/cache/tags/cache_tags.cc mem/cache/tags/fa_lru.cc mem/cache/tags/iic.cc mem/cache/tags/lru.cc diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc index 29ca09060..cb4e7f62e 100644 --- a/src/mem/cache/cache.cc +++ b/src/mem/cache/cache.cc @@ -38,8 +38,6 @@ #include "mem/config/cache.hh" -#include "mem/cache/tags/cache_tags.hh" - #if defined(USE_CACHE_LRU) #include "mem/cache/tags/lru.hh" #endif @@ -73,28 +71,28 @@ #if defined(USE_CACHE_FALRU) -template class Cache, SimpleCoherence>; -template class Cache, UniCoherence>; +template class Cache; +template class Cache; #endif #if defined(USE_CACHE_IIC) -template class Cache, SimpleCoherence>; -template class Cache, UniCoherence>; +template class Cache; +template class Cache; #endif #if defined(USE_CACHE_LRU) -template class Cache, SimpleCoherence>; -template class Cache, UniCoherence>; +template class Cache; +template class Cache; #endif #if defined(USE_CACHE_SPLIT) -template class Cache, SimpleCoherence>; -template class Cache, UniCoherence>; +template class Cache; +template class Cache; #endif #if defined(USE_CACHE_SPLIT_LIFO) -template class Cache, SimpleCoherence>; -template class Cache, UniCoherence>; +template class Cache; +template class Cache; #endif #endif //DOXYGEN_SHOULD_SKIP_THIS diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index 097b0f513..bd88849de 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -38,10 +38,12 @@ #ifndef __CACHE_HH__ #define __CACHE_HH__ +#include "base/compression/base.hh" #include "base/misc.hh" // fatal, panic, and warn #include "cpu/smt.hh" // SMT_MAX_THREADS #include "mem/cache/base_cache.hh" +#include "mem/cache/cache_blk.hh" #include "mem/cache/miss/miss_buffer.hh" #include "mem/cache/prefetch/prefetcher.hh" @@ -62,6 +64,8 @@ class Cache : public BaseCache public: /** Define the type of cache block to use. */ typedef typename TagStore::BlkType BlkType; + /** A typedef for a list of BlkType pointers. */ + typedef typename TagStore::BlkList BlkList; bool prefetchAccess; @@ -141,6 +145,156 @@ class Cache : public BaseCache PacketPtr invalidatePkt; Request *invalidateReq; + /** + * Policy class for performing compression. + */ + CompressionAlgorithm *compressionAlg; + + /** + * The block size of this cache. Set to value in the Tags object. + */ + const int16_t blkSize; + + /** + * Can this cache should allocate a block on a line-sized write miss. + */ + const bool doFastWrites; + + const bool prefetchMiss; + + /** + * Can the data can be stored in a compressed form. + */ + const bool storeCompressed; + + /** + * Do we need to compress blocks on writebacks (i.e. because + * writeback bus is compressed but storage is not)? + */ + const bool compressOnWriteback; + + /** + * The latency of a compression operation. + */ + const int16_t compLatency; + + /** + * Should we use an adaptive compression scheme. + */ + const bool adaptiveCompression; + + /** + * Do writebacks need to be compressed (i.e. because writeback bus + * is compressed), whether or not they're already compressed for + * storage. + */ + const bool writebackCompressed; + + /** + * Compare the internal block data to the fast access block data. + * @param blk The cache block to check. + * @return True if the data is the same. + */ + bool verifyData(BlkType *blk); + + /** + * Update the internal data of the block. The data to write is assumed to + * be in the fast access data. + * @param blk The block with the data to update. + * @param writebacks A list to store any generated writebacks. + * @param compress_block True if we should compress this block + */ + void updateData(BlkType *blk, PacketList &writebacks, bool compress_block); + + /** + * Handle a replacement for the given request. + * @param blk A pointer to the block, usually NULL + * @param pkt The memory request to satisfy. + * @param new_state The new state of the block. + * @param writebacks A list to store any generated writebacks. + */ + BlkType* doReplacement(BlkType *blk, PacketPtr &pkt, + CacheBlk::State new_state, PacketList &writebacks); + + /** + * Does all the processing necessary to perform the provided request. + * @param pkt The memory request to perform. + * @param lat The latency of the access. + * @param writebacks List for any writebacks that need to be performed. + * @param update True if the replacement data should be updated. + * @return Pointer to the cache block touched by the request. NULL if it + * was a miss. + */ + BlkType* handleAccess(PacketPtr &pkt, int & lat, + PacketList & writebacks, bool update = true); + + /** + * Populates a cache block and handles all outstanding requests for the + * satisfied fill request. This version takes an MSHR pointer and uses its + * request to fill the cache block, while repsonding to its targets. + * @param blk The cache block if it already exists. + * @param mshr The MSHR that contains the fill data and targets to satisfy. + * @param new_state The state of the new cache block. + * @param writebacks List for any writebacks that need to be performed. + * @return Pointer to the new cache block. + */ + BlkType* handleFill(BlkType *blk, MSHR * mshr, CacheBlk::State new_state, + PacketList & writebacks, PacketPtr pkt); + + /** + * Populates a cache block and handles all outstanding requests for the + * satisfied fill request. This version takes two memory requests. One + * contains the fill data, the other is an optional target to satisfy. + * Used for Cache::probe. + * @param blk The cache block if it already exists. + * @param pkt The memory request with the fill data. + * @param new_state The state of the new cache block. + * @param writebacks List for any writebacks that need to be performed. + * @param target The memory request to perform after the fill. + * @return Pointer to the new cache block. + */ + BlkType* handleFill(BlkType *blk, PacketPtr &pkt, + CacheBlk::State new_state, + PacketList & writebacks, PacketPtr target = NULL); + + /** + * Sets the blk to the new state and handles the given request. + * @param blk The cache block being snooped. + * @param new_state The new coherence state for the block. + * @param pkt The request to satisfy + */ + void handleSnoop(BlkType *blk, CacheBlk::State new_state, + PacketPtr &pkt); + + /** + * Sets the blk to the new state. + * @param blk The cache block being snooped. + * @param new_state The new coherence state for the block. + */ + void handleSnoop(BlkType *blk, CacheBlk::State new_state); + + /** + * Create a writeback request for the given block. + * @param blk The block to writeback. + * @return The writeback request for the block. + */ + PacketPtr writebackBlk(BlkType *blk); + + BlkType* findBlock(Addr addr) + { + return tags->findBlock(addr); + } + + BlkType* findBlock(PacketPtr &pkt) + { + return tags->findBlock(pkt->getAddr()); + } + + void invalidateBlk(CacheBlk *blk) + { + tags->invalidateBlk(tags->regenerateBlkAddr(blk->tag, blk->set)); + } + public: class Params @@ -153,15 +307,38 @@ class Cache : public BaseCache Prefetcher *prefetcher; bool prefetchAccess; int hitLatency; + CompressionAlgorithm *compressionAlg; + const int16_t blkSize; + const bool doFastWrites; + const bool prefetchMiss; + const bool storeCompressed; + const bool compressOnWriteback; + const int16_t compLatency; + const bool adaptiveCompression; + const bool writebackCompressed; Params(TagStore *_tags, MissBuffer *mq, Coherence *coh, BaseCache::Params params, Prefetcher *_prefetcher, - bool prefetch_access, int hit_latency) + bool prefetch_access, int hit_latency, + bool do_fast_writes, + bool store_compressed, bool adaptive_compression, + bool writeback_compressed, + CompressionAlgorithm *_compressionAlg, int comp_latency, + bool prefetch_miss) : tags(_tags), missQueue(mq), coherence(coh), baseParams(params), prefetcher(_prefetcher), prefetchAccess(prefetch_access), - hitLatency(hit_latency) + hitLatency(hit_latency), + compressionAlg(_compressionAlg), + blkSize(_tags->getBlockSize()), + doFastWrites(do_fast_writes), + prefetchMiss(prefetch_miss), + storeCompressed(store_compressed), + compressOnWriteback(!store_compressed && writeback_compressed), + compLatency(comp_latency), + adaptiveCompression(adaptive_compression), + writebackCompressed(writeback_compressed) { } }; diff --git a/src/mem/cache/cache_builder.cc b/src/mem/cache/cache_builder.cc index 4b1b19718..e212650f2 100644 --- a/src/mem/cache/cache_builder.cc +++ b/src/mem/cache/cache_builder.cc @@ -70,9 +70,6 @@ #include "base/compression/null_compression.hh" #include "base/compression/lzss_compression.hh" -// CacheTags Templates -#include "mem/cache/tags/cache_tags.hh" - // MissQueue Templates #include "mem/cache/miss/miss_queue.hh" #include "mem/cache/miss/blocking_buffer.hh" @@ -108,8 +105,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache) Param tgts_per_mshr; Param write_buffers; Param prioritizeRequests; -// SimObjectParam in_bus; -// SimObjectParam out_bus; SimObjectParam protocol; Param trace_addr; Param hash_delay; @@ -122,7 +117,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache) Param compression_latency; Param subblock_size; Param max_miss_count; -// SimObjectParam hier; VectorParam > addr_range; // SimObjectParam mem_trace; Param split; @@ -156,9 +150,6 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache) INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8), INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first", false), -/* INIT_PARAM_DFLT(in_bus, "incoming bus object", NULL), - INIT_PARAM(out_bus, "outgoing bus object"), -*/ INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL), INIT_PARAM_DFLT(trace_addr, "address to trace", 0), @@ -182,10 +173,6 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache) INIT_PARAM_DFLT(max_miss_count, "The number of misses to handle before calling exit", 0), -/* INIT_PARAM_DFLT(hier, - "Hierarchy global variables", - &defaultHierParams), -*/ INIT_PARAM_DFLT(addr_range, "The address range in bytes", vector >(1,RangeIn((Addr)0, MaxAddr))), // INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL), @@ -208,47 +195,47 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache) END_INIT_SIM_OBJECT_PARAMS(BaseCache) -#define BUILD_CACHE(t, c) do { \ - Prefetcher > *pf; \ - if (pf_policy == "tagged") { \ - BUILD_TAGGED_PREFETCHER(t); \ - } \ - else if (pf_policy == "stride") { \ - BUILD_STRIDED_PREFETCHER(t); \ - } \ - else if (pf_policy == "ghb") { \ - BUILD_GHB_PREFETCHER(t); \ - } \ - else { \ - BUILD_NULL_PREFETCHER(t); \ - } \ - Cache, c>::Params params(tagStore, mq, coh, \ - base_params, \ - pf, \ - prefetch_access, hit_latency); \ - Cache, c> *retval = \ - new Cache, c>(getInstanceName(), params); \ -return retval; \ +#define BUILD_CACHE(TAGS, tags, c) \ + do { \ + Prefetcher *pf; \ + if (pf_policy == "tagged") { \ + BUILD_TAGGED_PREFETCHER(TAGS); \ + } \ + else if (pf_policy == "stride") { \ + BUILD_STRIDED_PREFETCHER(TAGS); \ + } \ + else if (pf_policy == "ghb") { \ + BUILD_GHB_PREFETCHER(TAGS); \ + } \ + else { \ + BUILD_NULL_PREFETCHER(TAGS); \ + } \ + Cache::Params params(tags, mq, coh, base_params, \ + pf, prefetch_access, hit_latency, \ + true, \ + store_compressed, \ + adaptive_compression, \ + compressed_bus, \ + compAlg, compression_latency, \ + prefetch_miss); \ + Cache *retval = \ + new Cache(getInstanceName(), params); \ + return retval; \ } while (0) #define BUILD_CACHE_PANIC(x) do { \ panic("%s not compiled into M5", x); \ } while (0) -#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \ - do { \ - CompressionAlgorithm *compAlg; \ - if (compressed_bus || store_compressed) { \ - compAlg = new LZSSCompression(); \ - } else { \ - compAlg = new NullCompression(); \ - } \ - CacheTags *tagStore = \ - new CacheTags(tags, compression_latency, true, \ - store_compressed, adaptive_compression, \ - compressed_bus, \ - compAlg, prefetch_miss); \ - BUILD_CACHE(TAGS, c); \ +#define BUILD_COMPRESSED_CACHE(TAGS, tags, c) \ + do { \ + CompressionAlgorithm *compAlg; \ + if (compressed_bus || store_compressed) { \ + compAlg = new LZSSCompression(); \ + } else { \ + compAlg = new NullCompression(); \ + } \ + BUILD_CACHE(TAGS, tags, c); \ } while (0) #if defined(USE_CACHE_FALRU) @@ -328,7 +315,7 @@ return retval; \ #if defined(USE_TAGGED) #define BUILD_TAGGED_PREFETCHER(t) pf = new \ - TaggedPrefetcher >(prefetcher_size, \ + TaggedPrefetcher(prefetcher_size, \ !prefetch_past_page, \ prefetch_serial_squash, \ prefetch_cache_check_push, \ @@ -341,7 +328,7 @@ return retval; \ #if defined(USE_STRIDED) #define BUILD_STRIDED_PREFETCHER(t) pf = new \ - StridePrefetcher >(prefetcher_size, \ + StridePrefetcher(prefetcher_size, \ !prefetch_past_page, \ prefetch_serial_squash, \ prefetch_cache_check_push, \ @@ -355,7 +342,7 @@ return retval; \ #if defined(USE_GHB) #define BUILD_GHB_PREFETCHER(t) pf = new \ - GHBPrefetcher >(prefetcher_size, \ + GHBPrefetcher(prefetcher_size, \ !prefetch_past_page, \ prefetch_serial_squash, \ prefetch_cache_check_push, \ @@ -369,7 +356,7 @@ return retval; \ #if defined(USE_TAGGED) #define BUILD_NULL_PREFETCHER(t) pf = new \ - TaggedPrefetcher >(prefetcher_size, \ + TaggedPrefetcher(prefetcher_size, \ !prefetch_past_page, \ prefetch_serial_squash, \ prefetch_cache_check_push, \ diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 9f48ff1f3..1d78b03c7 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -72,10 +72,18 @@ Cache(const std::string &_name, prefetchAccess(params.prefetchAccess), tags(params.tags), missQueue(params.missQueue), coherence(params.coherence), prefetcher(params.prefetcher), - hitLatency(params.hitLatency) + hitLatency(params.hitLatency), + compressionAlg(params.compressionAlg), + blkSize(params.blkSize), + doFastWrites(params.doFastWrites), + prefetchMiss(params.prefetchMiss), + storeCompressed(params.storeCompressed), + compressOnWriteback(params.compressOnWriteback), + compLatency(params.compLatency), + adaptiveCompression(params.adaptiveCompression), + writebackCompressed(params.writebackCompressed) { tags->setCache(this); - tags->setPrefetcher(prefetcher); missQueue->setCache(this); missQueue->setPrefetcher(prefetcher); coherence->setCache(this); @@ -97,6 +105,433 @@ Cache::regStats() prefetcher->regStats(name()); } +template +typename Cache::BlkType* +Cache::handleAccess(PacketPtr &pkt, int & lat, + PacketList & writebacks, bool update) +{ + // Set the block offset here + int offset = tags->extractBlkOffset(pkt->getAddr()); + + BlkType *blk = NULL; + if (update) { + blk = tags->findBlock(pkt, lat); + } else { + blk = tags->findBlock(pkt->getAddr()); + lat = 0; + } + if (blk != NULL) { + + if (!update) { + if (pkt->isWrite()){ + assert(offset < blkSize); + assert(pkt->getSize() <= blkSize); + assert(offset+pkt->getSize() <= blkSize); + memcpy(blk->data + offset, pkt->getPtr(), + pkt->getSize()); + } else if (!(pkt->flags & SATISFIED)) { + pkt->flags |= SATISFIED; + pkt->result = Packet::Success; + assert(offset < blkSize); + assert(pkt->getSize() <= blkSize); + assert(offset + pkt->getSize() <=blkSize); + memcpy(pkt->getPtr(), blk->data + offset, + pkt->getSize()); + } + return blk; + } + + // Hit + if (blk->isPrefetch()) { + //Signal that this was a hit under prefetch (no need for + //use prefetch (only can get here if true) + DPRINTF(HWPrefetch, "Hit a block that was prefetched\n"); + blk->status &= ~BlkHWPrefetched; + if (prefetchMiss) { + //If we are using the miss stream, signal the + //prefetcher otherwise the access stream would have + //already signaled this hit + prefetcher->handleMiss(pkt, curTick); + } + } + + if ((pkt->isWrite() && blk->isWritable()) || + (pkt->isRead() && blk->isValid())) { + + // We are satisfying the request + pkt->flags |= SATISFIED; + + if (blk->isCompressed()) { + // If the data is compressed, need to increase the latency + lat += (compLatency/4); + } + + bool write_data = false; + + assert(verifyData(blk)); + + assert(offset < blkSize); + assert(pkt->getSize() <= blkSize); + assert(offset+pkt->getSize() <= blkSize); + + if (pkt->isWrite()) { + if (blk->checkWrite(pkt->req)) { + write_data = true; + blk->status |= BlkDirty; + memcpy(blk->data + offset, pkt->getPtr(), + pkt->getSize()); + } + } else { + assert(pkt->isRead()); + if (pkt->req->isLocked()) { + blk->trackLoadLocked(pkt->req); + } + memcpy(pkt->getPtr(), blk->data + offset, + pkt->getSize()); + } + + if (write_data || + (adaptiveCompression && blk->isCompressed())) + { + // If we wrote data, need to update the internal block + // data. + updateData(blk, writebacks, + !(adaptiveCompression && + blk->isReferenced())); + } + } else { + // permission violation, treat it as a miss + blk = NULL; + } + } else { + // complete miss (no matching block) + if (pkt->req->isLocked() && pkt->isWrite()) { + // miss on store conditional... just give up now + pkt->req->setScResult(0); + pkt->flags |= SATISFIED; + } + } + + return blk; +} + +template +typename Cache::BlkType* +Cache::handleFill(BlkType *blk, PacketPtr &pkt, + CacheBlk::State new_state, + PacketList & writebacks, + PacketPtr target) +{ +#ifndef NDEBUG + BlkType *tmp_blk = findBlock(pkt->getAddr()); + assert(tmp_blk == blk); +#endif + blk = doReplacement(blk, pkt, new_state, writebacks); + + + if (pkt->isRead()) { + memcpy(blk->data, pkt->getPtr(), blkSize); + } + + blk->whenReady = pkt->finishTime; + + // Respond to target, if any + if (target) { + + target->flags |= SATISFIED; + + if (target->cmd == Packet::InvalidateReq) { + invalidateBlk(blk); + blk = NULL; + } + + if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) { + assert(target->isWrite() || target->isRead()); + assert(target->getOffset(blkSize) + target->getSize() <= blkSize); + if (target->isWrite()) { + if (blk->checkWrite(pkt->req)) { + blk->status |= BlkDirty; + memcpy(blk->data + target->getOffset(blkSize), + target->getPtr(), target->getSize()); + } + } else { + if (pkt->req->isLocked()) { + blk->trackLoadLocked(pkt->req); + } + memcpy(target->getPtr(), + blk->data + target->getOffset(blkSize), + target->getSize()); + } + } + } + + if (blk) { + // Need to write the data into the block + updateData(blk, writebacks, !adaptiveCompression || true); + } + return blk; +} + +template +typename Cache::BlkType* +Cache::handleFill(BlkType *blk, MSHR * mshr, + CacheBlk::State new_state, + PacketList & writebacks, PacketPtr pkt) +{ +/* +#ifndef NDEBUG + BlkType *tmp_blk = findBlock(mshr->pkt->getAddr()); + assert(tmp_blk == blk); +#endif + PacketPtr pkt = mshr->pkt;*/ + blk = doReplacement(blk, pkt, new_state, writebacks); + + if (pkt->isRead()) { + memcpy(blk->data, pkt->getPtr(), blkSize); + } + + blk->whenReady = pkt->finishTime; + + + // respond to MSHR targets, if any + + // First offset for critical word first calculations + int initial_offset = 0; + + if (mshr->hasTargets()) { + initial_offset = mshr->getTarget()->getOffset(blkSize); + } + + while (mshr->hasTargets()) { + PacketPtr target = mshr->getTarget(); + + target->flags |= SATISFIED; + + // How many bytes pass the first request is this one + int transfer_offset = target->getOffset(blkSize) - initial_offset; + if (transfer_offset < 0) { + transfer_offset += blkSize; + } + + // If critical word (no offset) return first word time + Tick completion_time = tags->getHitLatency() + + transfer_offset ? pkt->finishTime : pkt->firstWordTime; + + if (target->cmd == Packet::InvalidateReq) { + //Mark the blk as invalid now, if it hasn't been already + if (blk) { + invalidateBlk(blk); + blk = NULL; + } + + //Also get rid of the invalidate + mshr->popTarget(); + + DPRINTF(Cache, "Popping off a Invalidate for addr %x\n", + pkt->getAddr()); + + continue; + } + + if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) { + assert(target->isWrite() || target->isRead()); + assert(target->getOffset(blkSize) + target->getSize() <= blkSize); + if (target->isWrite()) { + if (blk->checkWrite(pkt->req)) { + blk->status |= BlkDirty; + memcpy(blk->data + target->getOffset(blkSize), + target->getPtr(), target->getSize()); + } + } else { + if (pkt->req->isLocked()) { + blk->trackLoadLocked(pkt->req); + } + memcpy(target->getPtr(), + blk->data + target->getOffset(blkSize), + target->getSize()); + } + } else { + // Invalid access, need to do another request + // can occur if block is invalidated, or not correct + // permissions +// mshr->pkt = pkt; + break; + } + respondToMiss(target, completion_time); + mshr->popTarget(); + } + + if (blk) { + // Need to write the data into the block + updateData(blk, writebacks, !adaptiveCompression || true); + } + + return blk; +} + + +template +void +Cache::handleSnoop(BlkType *blk, + CacheBlk::State new_state, + PacketPtr &pkt) +{ + //Must have the block to supply + assert(blk); + // Can only supply data, and if it hasn't already been supllied + assert(pkt->isRead()); + assert(!(pkt->flags & SATISFIED)); + pkt->flags |= SATISFIED; + Addr offset = pkt->getOffset(blkSize); + assert(offset < blkSize); + assert(pkt->getSize() <= blkSize); + assert(offset + pkt->getSize() <=blkSize); + memcpy(pkt->getPtr(), blk->data + offset, pkt->getSize()); + + handleSnoop(blk, new_state); +} + +template +void +Cache::handleSnoop(BlkType *blk, + CacheBlk::State new_state) +{ + if (blk && blk->status != new_state) { + if ((new_state && BlkValid) == 0) { + invalidateBlk(blk); + } else { + assert(new_state >= 0 && new_state < 128); + blk->status = new_state; + } + } +} + +template +PacketPtr +Cache::writebackBlk(BlkType *blk) +{ + assert(blk && blk->isValid() && blk->isModified()); + int data_size = blkSize; + data_size = blk->size; + if (compressOnWriteback) { + // not already compressed + // need to compress to ship it + assert(data_size == blkSize); + uint8_t *tmp_data = new uint8_t[blkSize]; + data_size = compressionAlg->compress(tmp_data,blk->data, + data_size); + delete [] tmp_data; + } + +/* PacketPtr writeback = + buildWritebackReq(tags->regenerateBlkAddr(blk->tag, blk->set), + blk->asid, blkSize, + blk->data, data_size); +*/ + + Request *writebackReq = + new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0); + PacketPtr writeback = new Packet(writebackReq, Packet::Writeback, -1); + writeback->allocate(); + memcpy(writeback->getPtr(),blk->data,blkSize); + + blk->status &= ~BlkDirty; + return writeback; +} + + +template +bool +Cache::verifyData(BlkType *blk) +{ + bool retval; + // The data stored in the blk + uint8_t *blk_data = new uint8_t[blkSize]; + tags->readData(blk, blk_data); + // Pointer for uncompressed data, assumed uncompressed + uint8_t *tmp_data = blk_data; + // The size of the data being stored, assumed uncompressed + int data_size = blkSize; + + // If the block is compressed need to uncompress to access + if (blk->isCompressed()){ + // Allocate new storage for the data + tmp_data = new uint8_t[blkSize]; + data_size = compressionAlg->uncompress(tmp_data,blk_data, blk->size); + assert(data_size == blkSize); + // Don't need to keep blk_data around + delete [] blk_data; + } else { + assert(blkSize == blk->size); + } + + retval = memcmp(tmp_data, blk->data, blkSize) == 0; + delete [] tmp_data; + return retval; +} + +template +void +Cache::updateData(BlkType *blk, PacketList &writebacks, + bool compress_block) +{ + if (storeCompressed && compress_block) { + uint8_t *comp_data = new uint8_t[blkSize]; + int new_size = compressionAlg->compress(comp_data, blk->data, blkSize); + if (new_size > (blkSize - tags->getSubBlockSize())){ + // no benefit to storing it compressed + blk->status &= ~BlkCompressed; + tags->writeData(blk, blk->data, blkSize, + writebacks); + } else { + // Store the data compressed + blk->status |= BlkCompressed; + tags->writeData(blk, comp_data, new_size, + writebacks); + } + delete [] comp_data; + } else { + blk->status &= ~BlkCompressed; + tags->writeData(blk, blk->data, blkSize, writebacks); + } +} + +template +typename Cache::BlkType* +Cache::doReplacement(BlkType *blk, PacketPtr &pkt, + CacheBlk::State new_state, + PacketList &writebacks) +{ + if (blk == NULL) { + // need to do a replacement + BlkList compress_list; + blk = tags->findReplacement(pkt, writebacks, compress_list); + while (adaptiveCompression && !compress_list.empty()) { + updateData(compress_list.front(), writebacks, true); + compress_list.pop_front(); + } + if (blk->isValid()) { + DPRINTF(Cache, "replacement: replacing %x with %x: %s\n", + tags->regenerateBlkAddr(blk->tag,blk->set), pkt->getAddr(), + (blk->isModified()) ? "writeback" : "clean"); + + if (blk->isModified()) { + // Need to write the data back + writebacks.push_back(writebackBlk(blk)); + } + } + blk->tag = tags->extractTag(pkt->getAddr(), blk); + } else { + // must be a status change + // assert(blk->status != new_state); + if (blk->status == new_state) warn("Changing state to same value\n"); + } + + blk->status = new_state; + return blk; +} + + template bool Cache::access(PacketPtr &pkt) @@ -112,7 +547,7 @@ Cache::access(PacketPtr &pkt) prefetcher->handleMiss(pkt, curTick); } if (!pkt->req->isUncacheable()) { - blk = tags->handleAccess(pkt, lat, writebacks); + blk = handleAccess(pkt, lat, writebacks); } else { size = pkt->getSize(); } @@ -130,7 +565,7 @@ Cache::access(PacketPtr &pkt) warn("WriteInv doing a fastallocate" "with an outstanding miss to the same address\n"); } - blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable, + blk = handleFill(NULL, pkt, BlkValid | BlkWritable, writebacks); ++fastWrites; } @@ -195,7 +630,7 @@ Cache::getPacket() if (!pkt->req->isUncacheable()) { if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++; - BlkType *blk = tags->findBlock(pkt); + BlkType *blk = findBlock(pkt); Packet::Command cmd = coherence->getBusCmd(pkt->cmd, (blk)? blk->status : 0); missQueue->setBusCmd(pkt, cmd); @@ -224,7 +659,7 @@ Cache::sendResult(PacketPtr &pkt, MSHR* mshr, if (upgrade) { assert(pkt); //Upgrades need to be fixed pkt->flags &= ~CACHE_LINE_FILL; - BlkType *blk = tags->findBlock(pkt); + BlkType *blk = findBlock(pkt); CacheBlk::State old_state = (blk) ? blk->status : 0; CacheBlk::State new_state = coherence->getNewState(pkt,old_state); if (old_state != new_state) @@ -233,7 +668,7 @@ Cache::sendResult(PacketPtr &pkt, MSHR* mshr, //Set the state on the upgrade memcpy(pkt->getPtr(), blk->data, blkSize); PacketList writebacks; - tags->handleFill(blk, mshr, new_state, writebacks, pkt); + handleFill(blk, mshr, new_state, writebacks, pkt); assert(writebacks.empty()); missQueue->handleResponse(pkt, curTick + hitLatency); } @@ -275,7 +710,7 @@ Cache::handleResponse(PacketPtr &pkt) if (pkt->isCacheFill() && !pkt->isNoAllocate()) { DPRINTF(Cache, "Block for addr %x being updated in Cache\n", pkt->getAddr()); - blk = tags->findBlock(pkt); + blk = findBlock(pkt); CacheBlk::State old_state = (blk) ? blk->status : 0; PacketList writebacks; CacheBlk::State new_state = coherence->getNewState(pkt,old_state); @@ -284,7 +719,7 @@ Cache::handleResponse(PacketPtr &pkt) "state %i to %i\n", pkt->getAddr(), old_state, new_state); - blk = tags->handleFill(blk, (MSHR*)pkt->senderState, + blk = handleFill(blk, (MSHR*)pkt->senderState, new_state, writebacks, pkt); while (!writebacks.empty()) { missQueue->doWriteback(writebacks.front()); @@ -332,7 +767,7 @@ Cache::snoop(PacketPtr &pkt) } Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); - BlkType *blk = tags->findBlock(pkt); + BlkType *blk = findBlock(pkt); MSHR *mshr = missQueue->findMSHR(blk_addr); if (coherence->hasProtocol() || pkt->isInvalidate()) { //@todo Move this into handle bus req @@ -435,7 +870,7 @@ Cache::snoop(PacketPtr &pkt) "now supplying data, new state is %i\n", pkt->cmdString(), blk_addr, new_state); - tags->handleSnoop(blk, new_state, pkt); + handleSnoop(blk, new_state, pkt); respondToSnoop(pkt, curTick + hitLatency); return; } @@ -443,7 +878,7 @@ Cache::snoop(PacketPtr &pkt) DPRINTF(Cache, "Cache snooped a %s request for addr %x, " "new state is %i\n", pkt->cmdString(), blk_addr, new_state); - tags->handleSnoop(blk, new_state); + handleSnoop(blk, new_state); } template @@ -501,7 +936,7 @@ Cache::probe(PacketPtr &pkt, bool update, PacketList writebacks; int lat; - BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update); + BlkType *blk = handleAccess(pkt, lat, writebacks, update); DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(), (blk) ? "hit" : "miss"); @@ -557,7 +992,7 @@ Cache::probe(PacketPtr &pkt, bool update, if (!pkt->req->isUncacheable() /*Uncacheables just go through*/ && (pkt->cmd != Packet::Writeback)/*Writebacks on miss fall through*/) { // Fetch the cache block to fill - BlkType *blk = tags->findBlock(pkt); + BlkType *blk = findBlock(pkt); Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd, (blk)? blk->status : 0); @@ -593,7 +1028,7 @@ return 0; DPRINTF(Cache, "Block for blk addr %x moving from state " "%i to %i\n", busPkt->getAddr(), old_state, new_state); - tags->handleFill(blk, busPkt, new_state, writebacks, pkt); + handleFill(blk, busPkt, new_state, writebacks, pkt); //Free the packet delete busPkt; @@ -639,7 +1074,7 @@ Cache::snoopProbe(PacketPtr &pkt) } Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); - BlkType *blk = tags->findBlock(pkt); + BlkType *blk = findBlock(pkt); MSHR *mshr = missQueue->findMSHR(blk_addr); CacheBlk::State new_state = 0; bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); @@ -648,14 +1083,14 @@ Cache::snoopProbe(PacketPtr &pkt) "now supplying data, new state is %i\n", pkt->cmdString(), blk_addr, new_state); - tags->handleSnoop(blk, new_state, pkt); + handleSnoop(blk, new_state, pkt); return hitLatency; } if (blk) DPRINTF(Cache, "Cache snooped a %s request for addr %x, " "new state is %i\n", pkt->cmdString(), blk_addr, new_state); - tags->handleSnoop(blk, new_state); + handleSnoop(blk, new_state); return 0; } diff --git a/src/mem/cache/prefetch/ghb_prefetcher.cc b/src/mem/cache/prefetch/ghb_prefetcher.cc index a6c419113..7d537641e 100644 --- a/src/mem/cache/prefetch/ghb_prefetcher.cc +++ b/src/mem/cache/prefetch/ghb_prefetcher.cc @@ -34,8 +34,6 @@ * GHB Prefetcher template instantiations. */ -#include "mem/cache/tags/cache_tags.hh" - #include "mem/cache/tags/lru.hh" #include "mem/cache/prefetch/ghb_prefetcher.hh" @@ -43,6 +41,6 @@ // Template Instantiations #ifndef DOXYGEN_SHOULD_SKIP_THIS -template class GHBPrefetcher >; +template class GHBPrefetcher; #endif //DOXYGEN_SHOULD_SKIP_THIS diff --git a/src/mem/cache/prefetch/stride_prefetcher.cc b/src/mem/cache/prefetch/stride_prefetcher.cc index 2204871cc..847f2979e 100644 --- a/src/mem/cache/prefetch/stride_prefetcher.cc +++ b/src/mem/cache/prefetch/stride_prefetcher.cc @@ -34,8 +34,6 @@ * Stride Prefetcher template instantiations. */ -#include "mem/cache/tags/cache_tags.hh" - #include "mem/cache/tags/lru.hh" #include "mem/cache/prefetch/stride_prefetcher.hh" @@ -43,6 +41,6 @@ // Template Instantiations #ifndef DOXYGEN_SHOULD_SKIP_THIS -template class StridePrefetcher >; +template class StridePrefetcher; #endif //DOXYGEN_SHOULD_SKIP_THIS -- cgit v1.2.3 From f655932700dbe8d39ee618a2679cb43d2c41eaa1 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Mon, 18 Dec 2006 21:53:06 -0800 Subject: No need to template prefetcher on cache TagStore type. --HG-- rename : src/mem/cache/prefetch/tagged_prefetcher_impl.hh => src/mem/cache/prefetch/tagged_prefetcher.cc extra : convert_revision : 56c0b51e424a3a6590332dba4866e69a1ad19598 --- src/SConscript | 1 - src/mem/cache/base_cache.hh | 4 ++ src/mem/cache/cache.hh | 17 ++++-- src/mem/cache/cache_builder.cc | 70 ++++++++++----------- src/mem/cache/cache_impl.hh | 4 +- src/mem/cache/prefetch/base_prefetcher.cc | 26 +++++++- src/mem/cache/prefetch/base_prefetcher.hh | 13 ++-- src/mem/cache/prefetch/ghb_prefetcher.cc | 42 ++++++++++--- src/mem/cache/prefetch/ghb_prefetcher.hh | 59 +++--------------- src/mem/cache/prefetch/stride_prefetcher.cc | 58 ++++++++++++++++-- src/mem/cache/prefetch/stride_prefetcher.hh | 77 +++--------------------- src/mem/cache/prefetch/tagged_prefetcher.cc | 74 +++++++++++++++++++++++ src/mem/cache/prefetch/tagged_prefetcher.hh | 17 +----- src/mem/cache/prefetch/tagged_prefetcher_impl.hh | 76 ----------------------- 14 files changed, 261 insertions(+), 277 deletions(-) create mode 100644 src/mem/cache/prefetch/tagged_prefetcher.cc delete mode 100644 src/mem/cache/prefetch/tagged_prefetcher_impl.hh (limited to 'src') diff --git a/src/SConscript b/src/SConscript index 3149f0e59..178f5b345 100644 --- a/src/SConscript +++ b/src/SConscript @@ -114,7 +114,6 @@ base_sources = Split(''' mem/cache/miss/mshr_queue.cc mem/cache/prefetch/base_prefetcher.cc mem/cache/prefetch/ghb_prefetcher.cc - mem/cache/prefetch/prefetcher.cc mem/cache/prefetch/stride_prefetcher.cc mem/cache/prefetch/tagged_prefetcher.cc mem/cache/tags/base_tags.cc diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh index a7f035dce..c10d98e8e 100644 --- a/src/mem/cache/base_cache.hh +++ b/src/mem/cache/base_cache.hh @@ -692,6 +692,10 @@ class BaseCache : public MemObject } return true; } + + virtual bool inCache(Addr addr) = 0; + + virtual bool inMissQueue(Addr addr) = 0; }; #endif //__BASE_CACHE_HH__ diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index bd88849de..ba424d128 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -45,11 +45,10 @@ #include "mem/cache/base_cache.hh" #include "mem/cache/cache_blk.hh" #include "mem/cache/miss/miss_buffer.hh" -#include "mem/cache/prefetch/prefetcher.hh" //Forward decleration class MSHR; - +class BasePrefetcher; /** * A template-policy based cache. The behavior of the cache can be altered by @@ -119,7 +118,7 @@ class Cache : public BaseCache Coherence *coherence; /** Prefetcher */ - Prefetcher *prefetcher; + BasePrefetcher *prefetcher; /** * The clock ratio of the outgoing bus. @@ -304,7 +303,7 @@ class Cache : public BaseCache MissBuffer *missQueue; Coherence *coherence; BaseCache::Params baseParams; - Prefetcher *prefetcher; + BasePrefetcher*prefetcher; bool prefetchAccess; int hitLatency; CompressionAlgorithm *compressionAlg; @@ -319,7 +318,7 @@ class Cache : public BaseCache Params(TagStore *_tags, MissBuffer *mq, Coherence *coh, BaseCache::Params params, - Prefetcher *_prefetcher, + BasePrefetcher *_prefetcher, bool prefetch_access, int hit_latency, bool do_fast_writes, bool store_compressed, bool adaptive_compression, @@ -450,6 +449,14 @@ class Cache : public BaseCache * @return The estimated completion time. */ Tick snoopProbe(PacketPtr &pkt); + + bool inCache(Addr addr) { + return (tags->findBlock(addr) != 0); + } + + bool inMissQueue(Addr addr) { + return (missQueue->findMSHR(addr) != 0); + } }; #endif // __CACHE_HH__ diff --git a/src/mem/cache/cache_builder.cc b/src/mem/cache/cache_builder.cc index e212650f2..318b57d50 100644 --- a/src/mem/cache/cache_builder.cc +++ b/src/mem/cache/cache_builder.cc @@ -197,7 +197,7 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache) #define BUILD_CACHE(TAGS, tags, c) \ do { \ - Prefetcher *pf; \ + BasePrefetcher *pf; \ if (pf_policy == "tagged") { \ BUILD_TAGGED_PREFETCHER(TAGS); \ } \ @@ -314,55 +314,55 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache) } while (0) #if defined(USE_TAGGED) -#define BUILD_TAGGED_PREFETCHER(t) pf = new \ - TaggedPrefetcher(prefetcher_size, \ - !prefetch_past_page, \ - prefetch_serial_squash, \ - prefetch_cache_check_push, \ - prefetch_data_accesses_only, \ - prefetch_latency, \ - prefetch_degree) +#define BUILD_TAGGED_PREFETCHER(t) \ + pf = new TaggedPrefetcher(prefetcher_size, \ + !prefetch_past_page, \ + prefetch_serial_squash, \ + prefetch_cache_check_push, \ + prefetch_data_accesses_only, \ + prefetch_latency, \ + prefetch_degree) #else #define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher") #endif #if defined(USE_STRIDED) -#define BUILD_STRIDED_PREFETCHER(t) pf = new \ - StridePrefetcher(prefetcher_size, \ - !prefetch_past_page, \ - prefetch_serial_squash, \ - prefetch_cache_check_push, \ - prefetch_data_accesses_only, \ - prefetch_latency, \ - prefetch_degree, \ - prefetch_use_cpu_id) +#define BUILD_STRIDED_PREFETCHER(t) \ + pf = new StridePrefetcher(prefetcher_size, \ + !prefetch_past_page, \ + prefetch_serial_squash, \ + prefetch_cache_check_push, \ + prefetch_data_accesses_only, \ + prefetch_latency, \ + prefetch_degree, \ + prefetch_use_cpu_id) #else #define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher") #endif #if defined(USE_GHB) -#define BUILD_GHB_PREFETCHER(t) pf = new \ - GHBPrefetcher(prefetcher_size, \ - !prefetch_past_page, \ - prefetch_serial_squash, \ - prefetch_cache_check_push, \ - prefetch_data_accesses_only, \ - prefetch_latency, \ - prefetch_degree, \ - prefetch_use_cpu_id) +#define BUILD_GHB_PREFETCHER(t) \ + pf = new GHBPrefetcher(prefetcher_size, \ + !prefetch_past_page, \ + prefetch_serial_squash, \ + prefetch_cache_check_push, \ + prefetch_data_accesses_only, \ + prefetch_latency, \ + prefetch_degree, \ + prefetch_use_cpu_id) #else #define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher") #endif #if defined(USE_TAGGED) -#define BUILD_NULL_PREFETCHER(t) pf = new \ - TaggedPrefetcher(prefetcher_size, \ - !prefetch_past_page, \ - prefetch_serial_squash, \ - prefetch_cache_check_push, \ - prefetch_data_accesses_only, \ - prefetch_latency, \ - prefetch_degree) +#define BUILD_NULL_PREFETCHER(t) \ + pf = new TaggedPrefetcher(prefetcher_size, \ + !prefetch_past_page, \ + prefetch_serial_squash, \ + prefetch_cache_check_push, \ + prefetch_data_accesses_only, \ + prefetch_latency, \ + prefetch_degree) #else #define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)") #endif diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 1d78b03c7..2333e4a0e 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -49,7 +49,7 @@ #include "mem/cache/cache.hh" #include "mem/cache/cache_blk.hh" #include "mem/cache/miss/mshr.hh" -#include "mem/cache/prefetch/prefetcher.hh" +#include "mem/cache/prefetch/base_prefetcher.hh" #include "sim/sim_exit.hh" // for SimExitEvent @@ -88,8 +88,6 @@ Cache(const std::string &_name, missQueue->setPrefetcher(prefetcher); coherence->setCache(this); prefetcher->setCache(this); - prefetcher->setTags(tags); - prefetcher->setBuffer(missQueue); invalidateReq = new Request((Addr) NULL, blkSize, 0); invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0); } diff --git a/src/mem/cache/prefetch/base_prefetcher.cc b/src/mem/cache/prefetch/base_prefetcher.cc index a1388fad6..4254800b1 100644 --- a/src/mem/cache/prefetch/base_prefetcher.cc +++ b/src/mem/cache/prefetch/base_prefetcher.cc @@ -102,6 +102,26 @@ BasePrefetcher::regStats(const std::string &name) ; } +inline bool +BasePrefetcher::inCache(Addr addr) +{ + if (cache->inCache(addr)) { + pfCacheHit++; + return true; + } + return false; +} + +inline bool +BasePrefetcher::inMissQueue(Addr addr) +{ + if (cache->inMissQueue(addr)) { + pfMSHRHit++; + return true; + } + return false; +} + PacketPtr BasePrefetcher::getPacket() { @@ -118,7 +138,7 @@ BasePrefetcher::getPacket() pkt = *pf.begin(); pf.pop_front(); if (!cacheCheckPush) { - keepTrying = inCache(pkt); + keepTrying = cache->inCache(pkt->getAddr()); } if (pf.empty()) { cache->clearMasterRequest(Request_PF); @@ -190,7 +210,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time) //Check if it is already in the cache if (cacheCheckPush) { - if (inCache(prefetch)) { + if (cache->inCache(prefetch->getAddr())) { addr++; delay++; continue; @@ -198,7 +218,7 @@ BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time) } //Check if it is already in the miss_queue - if (inMissQueue(prefetch->getAddr())) { + if (cache->inMissQueue(prefetch->getAddr())) { addr++; delay++; continue; diff --git a/src/mem/cache/prefetch/base_prefetcher.hh b/src/mem/cache/prefetch/base_prefetcher.hh index 781d3ab09..2780f5e5a 100644 --- a/src/mem/cache/prefetch/base_prefetcher.hh +++ b/src/mem/cache/prefetch/base_prefetcher.hh @@ -36,10 +36,13 @@ #ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__ -#include "mem/packet.hh" #include +#include "base/statistics.hh" +#include "mem/packet.hh" + class BaseCache; + class BasePrefetcher { protected: @@ -95,6 +98,10 @@ class BasePrefetcher void handleMiss(PacketPtr &pkt, Tick time); + bool inCache(Addr addr); + + bool inMissQueue(Addr addr); + PacketPtr getPacket(); bool havePending() @@ -106,10 +113,6 @@ class BasePrefetcher std::list &addresses, std::list &delays) = 0; - virtual bool inCache(PacketPtr &pkt) = 0; - - virtual bool inMissQueue(Addr address) = 0; - std::list::iterator inPrefetch(Addr address); }; diff --git a/src/mem/cache/prefetch/ghb_prefetcher.cc b/src/mem/cache/prefetch/ghb_prefetcher.cc index 7d537641e..d7d819a2d 100644 --- a/src/mem/cache/prefetch/ghb_prefetcher.cc +++ b/src/mem/cache/prefetch/ghb_prefetcher.cc @@ -31,16 +31,44 @@ /** * @file - * GHB Prefetcher template instantiations. + * GHB Prefetcher implementation. */ -#include "mem/cache/tags/lru.hh" - #include "mem/cache/prefetch/ghb_prefetcher.hh" +#include "arch/isa_traits.hh" + +void +GHBPrefetcher::calculatePrefetch(PacketPtr &pkt, std::list &addresses, + std::list &delays) +{ + Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1); + int cpuID = pkt->req->getCpuNum(); + if (!useCPUId) cpuID = 0; + -// Template Instantiations -#ifndef DOXYGEN_SHOULD_SKIP_THIS + int new_stride = blkAddr - last_miss_addr[cpuID]; + int old_stride = last_miss_addr[cpuID] - + second_last_miss_addr[cpuID]; -template class GHBPrefetcher; + second_last_miss_addr[cpuID] = last_miss_addr[cpuID]; + last_miss_addr[cpuID] = blkAddr; -#endif //DOXYGEN_SHOULD_SKIP_THIS + if (new_stride == old_stride) { + for (int d=1; d <= degree; d++) { + Addr newAddr = blkAddr + d * new_stride; + if (this->pageStop && + (blkAddr & ~(TheISA::VMPageSize - 1)) != + (newAddr & ~(TheISA::VMPageSize - 1))) + { + //Spanned the page, so now stop + this->pfSpanPage += degree - d + 1; + return; + } + else + { + addresses.push_back(newAddr); + delays.push_back(latency); + } + } + } +} diff --git a/src/mem/cache/prefetch/ghb_prefetcher.hh b/src/mem/cache/prefetch/ghb_prefetcher.hh index c558a3e64..f31b56dcf 100644 --- a/src/mem/cache/prefetch/ghb_prefetcher.hh +++ b/src/mem/cache/prefetch/ghb_prefetcher.hh @@ -30,31 +30,18 @@ /** * @file - * Describes a ghb prefetcher based on template policies. + * Describes a ghb prefetcher. */ #ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__ -#include "base/misc.hh" // fatal, panic, and warn +#include "mem/cache/prefetch/base_prefetcher.hh" -#include "mem/cache/prefetch/prefetcher.hh" - -/** - * A template-policy based cache. The behavior of the cache can be altered by - * supplying different template policies. TagStore handles all tag and data - * storage @sa TagStore. MissBuffer handles all misses and writes/writebacks - * @sa MissQueue. Coherence handles all coherence policy details @sa - * UniCoherence, SimpleMultiCoherence. - */ -template -class GHBPrefetcher : public Prefetcher +class GHBPrefetcher : public BasePrefetcher { protected: - MissBuffer* mq; - TagStore* tags; - Addr second_last_miss_addr[64/*MAX_CPUS*/]; Addr last_miss_addr[64/*MAX_CPUS*/]; @@ -67,48 +54,16 @@ class GHBPrefetcher : public Prefetcher GHBPrefetcher(int size, bool pageStop, bool serialSquash, bool cacheCheckPush, bool onlyData, Tick latency, int degree, bool useCPUId) - :Prefetcher(size, pageStop, serialSquash, - cacheCheckPush, onlyData), - latency(latency), degree(degree), useCPUId(useCPUId) + : BasePrefetcher(size, pageStop, serialSquash, + cacheCheckPush, onlyData), + latency(latency), degree(degree), useCPUId(useCPUId) { } ~GHBPrefetcher() {} void calculatePrefetch(PacketPtr &pkt, std::list &addresses, - std::list &delays) - { - Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1); - int cpuID = pkt->req->getCpuNum(); - if (!useCPUId) cpuID = 0; - - - int new_stride = blkAddr - last_miss_addr[cpuID]; - int old_stride = last_miss_addr[cpuID] - - second_last_miss_addr[cpuID]; - - second_last_miss_addr[cpuID] = last_miss_addr[cpuID]; - last_miss_addr[cpuID] = blkAddr; - - if (new_stride == old_stride) { - for (int d=1; d <= degree; d++) { - Addr newAddr = blkAddr + d * new_stride; - if (this->pageStop && - (blkAddr & ~(TheISA::VMPageSize - 1)) != - (newAddr & ~(TheISA::VMPageSize - 1))) - { - //Spanned the page, so now stop - this->pfSpanPage += degree - d + 1; - return; - } - else - { - addresses.push_back(newAddr); - delays.push_back(latency); - } - } - } - } + std::list &delays); }; #endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__ diff --git a/src/mem/cache/prefetch/stride_prefetcher.cc b/src/mem/cache/prefetch/stride_prefetcher.cc index 847f2979e..8d957182a 100644 --- a/src/mem/cache/prefetch/stride_prefetcher.cc +++ b/src/mem/cache/prefetch/stride_prefetcher.cc @@ -34,13 +34,59 @@ * Stride Prefetcher template instantiations. */ -#include "mem/cache/tags/lru.hh" - #include "mem/cache/prefetch/stride_prefetcher.hh" -// Template Instantiations -#ifndef DOXYGEN_SHOULD_SKIP_THIS +void +StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list &addresses, + std::list &delays) +{ +// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1); + int cpuID = pkt->req->getCpuNum(); + if (!useCPUId) cpuID = 0; + + /* Scan Table for IAddr Match */ +/* std::list::iterator iter; + for (iter=table[cpuID].begin(); + iter !=table[cpuID].end(); + iter++) { + if ((*iter)->IAddr == pkt->pc) break; + } + + if (iter != table[cpuID].end()) { + //Hit in table + + int newStride = blkAddr - (*iter)->MAddr; + if (newStride == (*iter)->stride) { + (*iter)->confidence++; + } + else { + (*iter)->stride = newStride; + (*iter)->confidence--; + } + + (*iter)->MAddr = blkAddr; -template class StridePrefetcher; + for (int d=1; d <= degree; d++) { + Addr newAddr = blkAddr + d * newStride; + if (this->pageStop && + (blkAddr & ~(TheISA::VMPageSize - 1)) != + (newAddr & ~(TheISA::VMPageSize - 1))) + { + //Spanned the page, so now stop + this->pfSpanPage += degree - d + 1; + return; + } + else + { + addresses.push_back(newAddr); + delays.push_back(latency); + } + } + } + else { + //Miss in table + //Find lowest confidence and replace -#endif //DOXYGEN_SHOULD_SKIP_THIS + } +*/ +} diff --git a/src/mem/cache/prefetch/stride_prefetcher.hh b/src/mem/cache/prefetch/stride_prefetcher.hh index 57e430400..831e60fb4 100644 --- a/src/mem/cache/prefetch/stride_prefetcher.hh +++ b/src/mem/cache/prefetch/stride_prefetcher.hh @@ -30,31 +30,18 @@ /** * @file - * Describes a strided prefetcher based on template policies. + * Describes a strided prefetcher. */ #ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__ -#include "base/misc.hh" // fatal, panic, and warn +#include "mem/cache/prefetch/base_prefetcher.hh" -#include "mem/cache/prefetch/prefetcher.hh" - -/** - * A template-policy based cache. The behavior of the cache can be altered by - * supplying different template policies. TagStore handles all tag and data - * storage @sa TagStore. MissBuffer handles all misses and writes/writebacks - * @sa MissQueue. Coherence handles all coherence policy details @sa - * UniCoherence, SimpleMultiCoherence. - */ -template -class StridePrefetcher : public Prefetcher +class StridePrefetcher : public BasePrefetcher { protected: - MissBuffer* mq; - TagStore* tags; - class strideEntry { public: @@ -84,66 +71,16 @@ class StridePrefetcher : public Prefetcher StridePrefetcher(int size, bool pageStop, bool serialSquash, bool cacheCheckPush, bool onlyData, Tick latency, int degree, bool useCPUId) - :Prefetcher(size, pageStop, serialSquash, - cacheCheckPush, onlyData), - latency(latency), degree(degree), useCPUId(useCPUId) + : BasePrefetcher(size, pageStop, serialSquash, + cacheCheckPush, onlyData), + latency(latency), degree(degree), useCPUId(useCPUId) { } ~StridePrefetcher() {} void calculatePrefetch(PacketPtr &pkt, std::list &addresses, - std::list &delays) - { -// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1); - int cpuID = pkt->req->getCpuNum(); - if (!useCPUId) cpuID = 0; - - /* Scan Table for IAddr Match */ -/* std::list::iterator iter; - for (iter=table[cpuID].begin(); - iter !=table[cpuID].end(); - iter++) { - if ((*iter)->IAddr == pkt->pc) break; - } - - if (iter != table[cpuID].end()) { - //Hit in table - - int newStride = blkAddr - (*iter)->MAddr; - if (newStride == (*iter)->stride) { - (*iter)->confidence++; - } - else { - (*iter)->stride = newStride; - (*iter)->confidence--; - } - - (*iter)->MAddr = blkAddr; - - for (int d=1; d <= degree; d++) { - Addr newAddr = blkAddr + d * newStride; - if (this->pageStop && - (blkAddr & ~(TheISA::VMPageSize - 1)) != - (newAddr & ~(TheISA::VMPageSize - 1))) - { - //Spanned the page, so now stop - this->pfSpanPage += degree - d + 1; - return; - } - else - { - addresses.push_back(newAddr); - delays.push_back(latency); - } - } - } - else { - //Miss in table - //Find lowest confidence and replace - - } -*/ } + std::list &delays); }; #endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__ diff --git a/src/mem/cache/prefetch/tagged_prefetcher.cc b/src/mem/cache/prefetch/tagged_prefetcher.cc new file mode 100644 index 000000000..bc1fa46b9 --- /dev/null +++ b/src/mem/cache/prefetch/tagged_prefetcher.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Ron Dreslinski + */ + +/** + * @file + * Describes a tagged prefetcher based on template policies. + */ + +#include "arch/isa_traits.hh" +#include "mem/cache/prefetch/tagged_prefetcher.hh" + +TaggedPrefetcher:: +TaggedPrefetcher(int size, bool pageStop, bool serialSquash, + bool cacheCheckPush, bool onlyData, + Tick latency, int degree) + : BasePrefetcher(size, pageStop, serialSquash, + cacheCheckPush, onlyData), + latency(latency), degree(degree) +{ +} + +void +TaggedPrefetcher:: +calculatePrefetch(PacketPtr &pkt, std::list &addresses, + std::list &delays) +{ + Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1); + + for (int d=1; d <= degree; d++) { + Addr newAddr = blkAddr + d*(this->blkSize); + if (this->pageStop && + (blkAddr & ~(TheISA::VMPageSize - 1)) != + (newAddr & ~(TheISA::VMPageSize - 1))) + { + //Spanned the page, so now stop + this->pfSpanPage += degree - d + 1; + return; + } + else + { + addresses.push_back(newAddr); + delays.push_back(latency); + } + } +} + + diff --git a/src/mem/cache/prefetch/tagged_prefetcher.hh b/src/mem/cache/prefetch/tagged_prefetcher.hh index dc2aaec50..b9d228aba 100644 --- a/src/mem/cache/prefetch/tagged_prefetcher.hh +++ b/src/mem/cache/prefetch/tagged_prefetcher.hh @@ -30,29 +30,18 @@ /** * @file - * Describes a tagged prefetcher based on template policies. + * Describes a tagged prefetcher. */ #ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__ #define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__ -#include "mem/cache/prefetch/prefetcher.hh" +#include "mem/cache/prefetch/base_prefetcher.hh" -/** - * A template-policy based cache. The behavior of the cache can be altered by - * supplying different template policies. TagStore handles all tag and data - * storage @sa TagStore. MissBuffer handles all misses and writes/writebacks - * @sa MissQueue. Coherence handles all coherence policy details @sa - * UniCoherence, SimpleMultiCoherence. - */ -template -class TaggedPrefetcher : public Prefetcher +class TaggedPrefetcher : public BasePrefetcher { protected: - MissBuffer* mq; - TagStore* tags; - Tick latency; int degree; diff --git a/src/mem/cache/prefetch/tagged_prefetcher_impl.hh b/src/mem/cache/prefetch/tagged_prefetcher_impl.hh deleted file mode 100644 index b3d4284c7..000000000 --- a/src/mem/cache/prefetch/tagged_prefetcher_impl.hh +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2005 The Regents of The University of Michigan - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Authors: Ron Dreslinski - */ - -/** - * @file - * Describes a tagged prefetcher based on template policies. - */ - -#include "arch/isa_traits.hh" -#include "mem/cache/prefetch/tagged_prefetcher.hh" - -template -TaggedPrefetcher:: -TaggedPrefetcher(int size, bool pageStop, bool serialSquash, - bool cacheCheckPush, bool onlyData, - Tick latency, int degree) - :Prefetcher(size, pageStop, serialSquash, - cacheCheckPush, onlyData), - latency(latency), degree(degree) -{ -} - -template -void -TaggedPrefetcher:: -calculatePrefetch(PacketPtr &pkt, std::list &addresses, - std::list &delays) -{ - Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1); - - for (int d=1; d <= degree; d++) { - Addr newAddr = blkAddr + d*(this->blkSize); - if (this->pageStop && - (blkAddr & ~(TheISA::VMPageSize - 1)) != - (newAddr & ~(TheISA::VMPageSize - 1))) - { - //Spanned the page, so now stop - this->pfSpanPage += degree - d + 1; - return; - } - else - { - addresses.push_back(newAddr); - delays.push_back(latency); - } - } -} - - -- cgit v1.2.3 From 9d7db8bb2bbd5ae6a3653385c896b19da4d33f69 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Mon, 18 Dec 2006 23:07:52 -0800 Subject: Streamline Cache/Tags interface: get rid of redundant functions, don't regenerate address from block in cache so that tags can turn around and use address to look up block again. --HG-- extra : convert_revision : 171018aa6e331d98399c4e5ef24e173c95eaca28 --- src/mem/cache/cache.hh | 23 --------------- src/mem/cache/cache_impl.hh | 29 +++++++------------ src/mem/cache/tags/fa_lru.cc | 43 +--------------------------- src/mem/cache/tags/fa_lru.hh | 17 ++--------- src/mem/cache/tags/iic.cc | 62 +--------------------------------------- src/mem/cache/tags/iic.hh | 16 ++--------- src/mem/cache/tags/lru.cc | 24 +--------------- src/mem/cache/tags/lru.hh | 16 ++--------- src/mem/cache/tags/split.cc | 58 +++---------------------------------- src/mem/cache/tags/split.hh | 16 ++--------- src/mem/cache/tags/split_lifo.cc | 28 +----------------- src/mem/cache/tags/split_lifo.hh | 16 ++--------- src/mem/cache/tags/split_lru.cc | 24 +--------------- src/mem/cache/tags/split_lru.hh | 16 ++--------- 14 files changed, 38 insertions(+), 350 deletions(-) (limited to 'src') diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index ba424d128..26dab2179 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -279,21 +279,6 @@ class Cache : public BaseCache */ PacketPtr writebackBlk(BlkType *blk); - BlkType* findBlock(Addr addr) - { - return tags->findBlock(addr); - } - - BlkType* findBlock(PacketPtr &pkt) - { - return tags->findBlock(pkt->getAddr()); - } - - void invalidateBlk(CacheBlk *blk) - { - tags->invalidateBlk(tags->regenerateBlkAddr(blk->tag, blk->set)); - } - public: class Params @@ -398,14 +383,6 @@ class Cache : public BaseCache void snoopResponse(PacketPtr &pkt); - /** - * Invalidates the block containing address if found. - * @param addr The address to look for. - * @param asid The address space ID of the address. - * @todo Is this function necessary? - */ - void invalidateBlk(Addr addr); - /** * Squash all requests associated with specified thread. * intended for use by I-cache. diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 2333e4a0e..9c41983fc 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -113,7 +113,7 @@ Cache::handleAccess(PacketPtr &pkt, int & lat, BlkType *blk = NULL; if (update) { - blk = tags->findBlock(pkt, lat); + blk = tags->findBlock(pkt->getAddr(), lat); } else { blk = tags->findBlock(pkt->getAddr()); lat = 0; @@ -221,7 +221,7 @@ Cache::handleFill(BlkType *blk, PacketPtr &pkt, PacketPtr target) { #ifndef NDEBUG - BlkType *tmp_blk = findBlock(pkt->getAddr()); + BlkType *tmp_blk = tags->findBlock(pkt->getAddr()); assert(tmp_blk == blk); #endif blk = doReplacement(blk, pkt, new_state, writebacks); @@ -239,7 +239,7 @@ Cache::handleFill(BlkType *blk, PacketPtr &pkt, target->flags |= SATISFIED; if (target->cmd == Packet::InvalidateReq) { - invalidateBlk(blk); + tags->invalidateBlk(blk); blk = NULL; } @@ -318,7 +318,7 @@ Cache::handleFill(BlkType *blk, MSHR * mshr, if (target->cmd == Packet::InvalidateReq) { //Mark the blk as invalid now, if it hasn't been already if (blk) { - invalidateBlk(blk); + tags->invalidateBlk(blk); blk = NULL; } @@ -396,7 +396,7 @@ Cache::handleSnoop(BlkType *blk, { if (blk && blk->status != new_state) { if ((new_state && BlkValid) == 0) { - invalidateBlk(blk); + tags->invalidateBlk(blk); } else { assert(new_state >= 0 && new_state < 128); blk->status = new_state; @@ -628,7 +628,7 @@ Cache::getPacket() if (!pkt->req->isUncacheable()) { if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++; - BlkType *blk = findBlock(pkt); + BlkType *blk = tags->findBlock(pkt->getAddr()); Packet::Command cmd = coherence->getBusCmd(pkt->cmd, (blk)? blk->status : 0); missQueue->setBusCmd(pkt, cmd); @@ -657,7 +657,7 @@ Cache::sendResult(PacketPtr &pkt, MSHR* mshr, if (upgrade) { assert(pkt); //Upgrades need to be fixed pkt->flags &= ~CACHE_LINE_FILL; - BlkType *blk = findBlock(pkt); + BlkType *blk = tags->findBlock(pkt->getAddr()); CacheBlk::State old_state = (blk) ? blk->status : 0; CacheBlk::State new_state = coherence->getNewState(pkt,old_state); if (old_state != new_state) @@ -708,7 +708,7 @@ Cache::handleResponse(PacketPtr &pkt) if (pkt->isCacheFill() && !pkt->isNoAllocate()) { DPRINTF(Cache, "Block for addr %x being updated in Cache\n", pkt->getAddr()); - blk = findBlock(pkt); + blk = tags->findBlock(pkt->getAddr()); CacheBlk::State old_state = (blk) ? blk->status : 0; PacketList writebacks; CacheBlk::State new_state = coherence->getNewState(pkt,old_state); @@ -765,7 +765,7 @@ Cache::snoop(PacketPtr &pkt) } Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); - BlkType *blk = findBlock(pkt); + BlkType *blk = tags->findBlock(pkt->getAddr()); MSHR *mshr = missQueue->findMSHR(blk_addr); if (coherence->hasProtocol() || pkt->isInvalidate()) { //@todo Move this into handle bus req @@ -898,13 +898,6 @@ Cache::snoopResponse(PacketPtr &pkt) } } -template -void -Cache::invalidateBlk(Addr addr) -{ - tags->invalidateBlk(addr); -} - /** * @todo Fix to not assume write allocate @@ -990,7 +983,7 @@ Cache::probe(PacketPtr &pkt, bool update, if (!pkt->req->isUncacheable() /*Uncacheables just go through*/ && (pkt->cmd != Packet::Writeback)/*Writebacks on miss fall through*/) { // Fetch the cache block to fill - BlkType *blk = findBlock(pkt); + BlkType *blk = tags->findBlock(pkt->getAddr()); Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd, (blk)? blk->status : 0); @@ -1072,7 +1065,7 @@ Cache::snoopProbe(PacketPtr &pkt) } Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1)); - BlkType *blk = findBlock(pkt); + BlkType *blk = tags->findBlock(pkt->getAddr()); MSHR *mshr = missQueue->findMSHR(blk_addr); CacheBlk::State new_state = 0; bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc index a58ddaff8..42a1fe34f 100644 --- a/src/mem/cache/tags/fa_lru.cc +++ b/src/mem/cache/tags/fa_lru.cc @@ -153,12 +153,9 @@ FALRU::probe(Addr addr) const } void -FALRU::invalidateBlk(Addr addr) +FALRU::invalidateBlk(FALRU::BlkType *blk) { - Addr blkAddr = blkAlign(addr); - FALRUBlk* blk = (*tagHash.find(blkAddr)).second; if (blk) { - assert(blk->tag == blkAddr); blk->status = 0; blk->isTouched = false; tagsInUse--; @@ -202,44 +199,6 @@ FALRU::findBlock(Addr addr, int &lat, int *inCache) return blk; } -FALRUBlk* -FALRU::findBlock(PacketPtr &pkt, int &lat, int *inCache) -{ - Addr addr = pkt->getAddr(); - - accesses++; - int tmp_in_cache = 0; - Addr blkAddr = blkAlign(addr); - FALRUBlk* blk = hashLookup(blkAddr); - - if (blk && blk->isValid()) { - assert(blk->tag == blkAddr); - tmp_in_cache = blk->inCache; - for (int i = 0; i < numCaches; i++) { - if (1<inCache) { - hits[i]++; - } else { - misses[i]++; - } - } - hits[numCaches]++; - if (blk != head){ - moveToHead(blk); - } - } else { - blk = NULL; - for (int i = 0; i < numCaches+1; ++i) { - misses[i]++; - } - } - if (inCache) { - *inCache = tmp_in_cache; - } - - lat = hitLatency; - //assert(check()); - return blk; -} FALRUBlk* FALRU::findBlock(Addr addr) const diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh index 2db89d603..dabbda740 100644 --- a/src/mem/cache/tags/fa_lru.hh +++ b/src/mem/cache/tags/fa_lru.hh @@ -173,11 +173,10 @@ public: bool probe(Addr addr) const; /** - * Invalidate the cache block that contains the given addr. - * @param asid The address space ID. - * @param addr The address to invalidate. + * Invalidate a cache block. + * @param blk The block to invalidate. */ - void invalidateBlk(Addr addr); + void invalidateBlk(BlkType *blk); /** * Find the block in the cache and update the replacement data. Returns @@ -190,16 +189,6 @@ public: */ FALRUBlk* findBlock(Addr addr, int &lat, int *inCache = 0); - /** - * Find the block in the cache and update the replacement data. Returns - * the access latency and the in cache flags as a side effect - * @param pkt The req whose block to find - * @param lat The latency of the access. - * @param inCache The FALRUBlk::inCache flags. - * @return Pointer to the cache block. - */ - FALRUBlk* findBlock(PacketPtr &pkt, int &lat, int *inCache = 0); - /** * Find the block in the cache, do not update the replacement data. * @param addr The address to look for. diff --git a/src/mem/cache/tags/iic.cc b/src/mem/cache/tags/iic.cc index f4e870659..38f9662ea 100644 --- a/src/mem/cache/tags/iic.cc +++ b/src/mem/cache/tags/iic.cc @@ -284,65 +284,6 @@ IIC::findBlock(Addr addr, int &lat) return tag_ptr; } -IICTag* -IIC::findBlock(PacketPtr &pkt, int &lat) -{ - Addr addr = pkt->getAddr(); - - Addr tag = extractTag(addr); - unsigned set = hash(addr); - int set_lat; - - unsigned long chain_ptr; - - if (PROFILE_IIC) - setAccess.sample(set); - - IICTag *tag_ptr = sets[set].findTag(tag, chain_ptr); - set_lat = 1; - if (tag_ptr == NULL && chain_ptr != tagNull) { - int secondary_depth; - tag_ptr = secondaryChain(tag, chain_ptr, &secondary_depth); - set_lat += secondary_depth; - // set depth for statistics fix this later!!! egh - sets[set].depth = set_lat; - - if (tag_ptr != NULL) { - /* need to move tag into primary table */ - // need to preserve chain: fix this egh - sets[set].tags[assoc-1]->chain_ptr = tag_ptr->chain_ptr; - tagSwap(tag_ptr - tagStore, sets[set].tags[assoc-1] - tagStore); - tag_ptr = sets[set].findTag(tag, chain_ptr); - assert(tag_ptr!=NULL); - } - - } - set_lat = set_lat * hashDelay + hitLatency; - if (tag_ptr != NULL) { - // IIC replacement: if this is not the first element of - // list, reorder - sets[set].moveToHead(tag_ptr); - - hitHashDepth.sample(sets[set].depth); - hashHit++; - hitDepthTotal += sets[set].depth; - tag_ptr->status |= BlkReferenced; - lat = set_lat; - if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) { - lat = tag_ptr->whenReady - curTick; - } - - tag_ptr->refCount += 1; - } - else { - // fall through: cache block not found, not a hit... - missHashDepth.sample(sets[set].depth); - hashMiss++; - missDepthTotal += sets[set].depth; - lat = set_lat; - } - return tag_ptr; -} IICTag* IIC::findBlock(Addr addr) const @@ -695,9 +636,8 @@ IIC::compressBlock(unsigned long index) } void -IIC::invalidateBlk(Addr addr) +IIC::invalidateBlk(IIC::BlkType *tag_ptr) { - IICTag* tag_ptr = findBlock(addr); if (tag_ptr) { for (int i = 0; i < tag_ptr->numData; ++i) { dataReferenceCount[tag_ptr->data_ptr[i]]--; diff --git a/src/mem/cache/tags/iic.hh b/src/mem/cache/tags/iic.hh index 92bd6da1d..d0663d330 100644 --- a/src/mem/cache/tags/iic.hh +++ b/src/mem/cache/tags/iic.hh @@ -435,11 +435,10 @@ class IIC : public BaseTags void compressBlock(unsigned long index); /** - * Invalidate the block containing the address. - * @param asid The address space ID. - * @param addr The address to invalidate. + * Invalidate a block. + * @param blk The block to invalidate. */ - void invalidateBlk(Addr addr); + void invalidateBlk(BlkType *blk); /** * Find the block and update the replacement data. This call also returns @@ -451,15 +450,6 @@ class IIC : public BaseTags */ IICTag* findBlock(Addr addr, int &lat); - /** - * Find the block and update the replacement data. This call also returns - * the access latency as a side effect. - * @param pkt The req whose block to find - * @param lat The access latency. - * @return A pointer to the block found, if any. - */ - IICTag* findBlock(PacketPtr &pkt, int &lat); - /** * Find the block, do not update the replacement data. * @param addr The address to find. diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc index 31d29aae6..102bb3506 100644 --- a/src/mem/cache/tags/lru.cc +++ b/src/mem/cache/tags/lru.cc @@ -183,27 +183,6 @@ LRU::findBlock(Addr addr, int &lat) return blk; } -LRUBlk* -LRU::findBlock(PacketPtr &pkt, int &lat) -{ - Addr addr = pkt->getAddr(); - - Addr tag = extractTag(addr); - unsigned set = extractSet(addr); - LRUBlk *blk = sets[set].findBlk(tag); - lat = hitLatency; - if (blk != NULL) { - // move this block to head of the MRU list - sets[set].moveToHead(blk); - if (blk->whenReady > curTick - && blk->whenReady - curTick > hitLatency) { - lat = blk->whenReady - curTick; - } - blk->refCount += 1; - } - - return blk; -} LRUBlk* LRU::findBlock(Addr addr) const @@ -240,9 +219,8 @@ LRU::findReplacement(PacketPtr &pkt, PacketList &writebacks, } void -LRU::invalidateBlk(Addr addr) +LRU::invalidateBlk(LRU::BlkType *blk) { - LRUBlk *blk = findBlock(addr); if (blk) { blk->status = 0; blk->isTouched = false; diff --git a/src/mem/cache/tags/lru.hh b/src/mem/cache/tags/lru.hh index fed688283..4b94adca6 100644 --- a/src/mem/cache/tags/lru.hh +++ b/src/mem/cache/tags/lru.hh @@ -161,20 +161,10 @@ public: bool probe(Addr addr) const; /** - * Invalidate the block containing the given address. - * @param asid The address space ID. - * @param addr The address to invalidate. - */ - void invalidateBlk(Addr addr); - - /** - * Finds the given address in the cache and update replacement data. - * Returns the access latency as a side effect. - * @param pkt The request whose block to find. - * @param lat The access latency. - * @return Pointer to the cache block if found. + * Invalidate the given block. + * @param blk The block to invalidate. */ - LRUBlk* findBlock(PacketPtr &pkt, int &lat); + void invalidateBlk(BlkType *blk); /** * Finds the given address in the cache and update replacement data. diff --git a/src/mem/cache/tags/split.cc b/src/mem/cache/tags/split.cc index bc74f0e0f..5ac87eaba 100644 --- a/src/mem/cache/tags/split.cc +++ b/src/mem/cache/tags/split.cc @@ -266,58 +266,6 @@ Split::probe(Addr addr) const return success; } -SplitBlk* -Split::findBlock(PacketPtr &pkt, int &lat) -{ - - Addr aligned = blkAlign(pkt->getAddr()); - - if (memHash.count(aligned)) { - memHash[aligned]++; - } else if (pkt->nic_pkt()) { - memHash[aligned] = 1; - } - - SplitBlk *blk = lru->findBlock(pkt->getAddr(), lat); - if (blk) { - if (pkt->nic_pkt()) { - NR_CP_hits++; - } else { - CR_CP_hits++; - } - } else { - if (lifo && lifo_net) { - blk = lifo_net->findBlock(pkt->getAddr(), lat); - - } else if (lru_net) { - blk = lru_net->findBlock(pkt->getAddr(), lat); - } - if (blk) { - if (pkt->nic_pkt()) { - NR_NP_hits++; - } else { - CR_NP_hits++; - } - } - } - - if (blk) { - Tick latency = curTick - blk->ts; - if (blk->isNIC) { - if (!blk->isUsed && !pkt->nic_pkt()) { - useByCPUCycleDist.sample(latency); - nicUseByCPUCycleTotal += latency; - nicBlksUsedByCPU++; - } - } - blk->isUsed = true; - - if (pkt->nic_pkt()) { - DPRINTF(Split, "found block in partition %d\n", blk->part); - } - } - return blk; -} SplitBlk* Split::findBlock(Addr addr, int &lat) @@ -403,14 +351,16 @@ Split::findReplacement(PacketPtr &pkt, PacketList &writebacks, } void -Split::invalidateBlk(Addr addr) +Split::invalidateBlk(Split::BlkType *blk) { - SplitBlk *blk = lru->findBlock(addr); if (!blk) { + fatal("FIXME!\n"); +#if 0 if (lifo && lifo_net) blk = lifo_net->findBlock(addr); else if (lru_net) blk = lru_net->findBlock(addr); +#endif if (!blk) return; diff --git a/src/mem/cache/tags/split.hh b/src/mem/cache/tags/split.hh index 898d3c7a0..e6ace0921 100644 --- a/src/mem/cache/tags/split.hh +++ b/src/mem/cache/tags/split.hh @@ -184,11 +184,10 @@ class Split : public BaseTags bool probe(Addr addr) const; /** - * Invalidate the block containing the given address. - * @param asid The address space ID. - * @param addr The address to invalidate. + * Invalidate the given block. + * @param blk The block to invalidate. */ - void invalidateBlk(Addr addr); + void invalidateBlk(BlkType *blk); /** * Finds the given address in the cache and update replacement data. @@ -200,15 +199,6 @@ class Split : public BaseTags */ SplitBlk* findBlock(Addr addr, int &lat); - /** - * Finds the given address in the cache and update replacement data. - * Returns the access latency as a side effect. - * @param pkt The memory request whose block to find - * @param lat The access latency. - * @return Pointer to the cache block if found. - */ - SplitBlk* findBlock(PacketPtr &pkt, int &lat); - /** * Finds the given address in the cache, do not update replacement data. * @param addr The address to find. diff --git a/src/mem/cache/tags/split_lifo.cc b/src/mem/cache/tags/split_lifo.cc index 302e2aaeb..792ff8fa7 100644 --- a/src/mem/cache/tags/split_lifo.cc +++ b/src/mem/cache/tags/split_lifo.cc @@ -254,31 +254,6 @@ SplitLIFO::findBlock(Addr addr, int &lat) return blk; } -SplitBlk* -SplitLIFO::findBlock(PacketPtr &pkt, int &lat) -{ - Addr addr = pkt->getAddr(); - - Addr tag = extractTag(addr); - unsigned set = extractSet(addr); - SplitBlk *blk = sets[set].findBlk(tag); - - if (blk) { - DPRINTF(Split, "Found LIFO blk %#x in set %d, with tag %#x\n", - addr, set, tag); - hits++; - - if (twoQueue) { - blk->isUsed = true; - sets[set].moveToFirstIn(blk); - } else { - sets[set].moveToLastIn(blk); - } - } - lat = hitLatency; - - return blk; -} SplitBlk* SplitLIFO::findBlock(Addr addr) const @@ -335,9 +310,8 @@ SplitLIFO::findReplacement(PacketPtr &pkt, PacketList &writebacks, } void -SplitLIFO::invalidateBlk(Addr addr) +SplitLIFO::invalidateBlk(SplitLIFO::BlkType *blk) { - SplitBlk *blk = findBlock(addr); if (blk) { blk->status = 0; blk->isTouched = false; diff --git a/src/mem/cache/tags/split_lifo.hh b/src/mem/cache/tags/split_lifo.hh index 6c3befe37..9001cdb14 100644 --- a/src/mem/cache/tags/split_lifo.hh +++ b/src/mem/cache/tags/split_lifo.hh @@ -184,11 +184,10 @@ public: bool probe( Addr addr) const; /** - * Invalidate the block containing the given address. - * @param asid The address space ID. - * @param addr The address to invalidate. + * Invalidate the given block. + * @param blk The block to invalidate. */ - void invalidateBlk(Addr addr); + void invalidateBlk(BlkType *blk); /** * Finds the given address in the cache and update replacement data. @@ -200,15 +199,6 @@ public: */ SplitBlk* findBlock(Addr addr, int &lat); - /** - * Finds the given address in the cache and update replacement data. - * Returns the access latency as a side effect. - * @param pkt The req whose block to find - * @param lat The access latency. - * @return Pointer to the cache block if found. - */ - SplitBlk* findBlock(PacketPtr &pkt, int &lat); - /** * Finds the given address in the cache, do not update replacement data. * @param addr The address to find. diff --git a/src/mem/cache/tags/split_lru.cc b/src/mem/cache/tags/split_lru.cc index 11c9a5d64..c37d72cb7 100644 --- a/src/mem/cache/tags/split_lru.cc +++ b/src/mem/cache/tags/split_lru.cc @@ -202,27 +202,6 @@ SplitLRU::findBlock(Addr addr, int &lat) return blk; } -SplitBlk* -SplitLRU::findBlock(PacketPtr &pkt, int &lat) -{ - Addr addr = pkt->getAddr(); - - Addr tag = extractTag(addr); - unsigned set = extractSet(addr); - SplitBlk *blk = sets[set].findBlk(tag); - lat = hitLatency; - if (blk != NULL) { - // move this block to head of the MRU list - sets[set].moveToHead(blk); - if (blk->whenReady > curTick && blk->whenReady - curTick > hitLatency){ - lat = blk->whenReady - curTick; - } - blk->refCount += 1; - hits++; - } - - return blk; -} SplitBlk* SplitLRU::findBlock(Addr addr) const @@ -261,9 +240,8 @@ SplitLRU::findReplacement(PacketPtr &pkt, PacketList &writebacks, } void -SplitLRU::invalidateBlk(Addr addr) +SplitLRU::invalidateBlk(SplitLRU::BlkType *blk) { - SplitBlk *blk = findBlock(addr); if (blk) { blk->status = 0; blk->isTouched = false; diff --git a/src/mem/cache/tags/split_lru.hh b/src/mem/cache/tags/split_lru.hh index 6160d59e5..e17a478d3 100644 --- a/src/mem/cache/tags/split_lru.hh +++ b/src/mem/cache/tags/split_lru.hh @@ -167,11 +167,10 @@ public: bool probe(Addr addr) const; /** - * Invalidate the block containing the given address. - * @param asid The address space ID. - * @param addr The address to invalidate. + * Invalidate the given block. + * @param blk The block to invalidate. */ - void invalidateBlk(Addr addr); + void invalidateBlk(BlkType *blk); /** * Finds the given address in the cache and update replacement data. @@ -183,15 +182,6 @@ public: */ SplitBlk* findBlock(Addr addr, int &lat); - /** - * Finds the given address in the cache and update replacement data. - * Returns the access latency as a side effect. - * @param pkt The req whose block to find. - * @param lat The access latency. - * @return Pointer to the cache block if found. - */ - SplitBlk* findBlock(PacketPtr &pkt, int &lat); - /** * Finds the given address in the cache, do not update replacement data. * @param addr The address to find. -- cgit v1.2.3 From 5e9d8795f2a2642843cbb73b2637adb97935521d Mon Sep 17 00:00:00 2001 From: Ali Saidi Date: Tue, 19 Dec 2006 02:11:33 -0500 Subject: fix twinx loads a little bit bugfixes and demap implementation in tlb ignore some more differencs for one cycle src/arch/sparc/isa/formats/mem/blockmem.isa: twinx has 2 micro-ops src/arch/sparc/isa/formats/mem/util.isa: fix the fault check for twinx src/arch/sparc/tlb.cc: tlb bugfixes and write demapping code src/cpu/exetrace.cc: don't halt on a couple more instruction (ldx, stx) when things differ beacuse of the way tlb faults are handled in legion. --HG-- extra : convert_revision : 1e156dead6ebd58b257213625ed63c3793ef4b71 --- src/arch/sparc/isa/formats/mem/blockmem.isa | 2 +- src/arch/sparc/isa/formats/mem/util.isa | 4 +- src/arch/sparc/tlb.cc | 87 ++++++++++++++++++++++++++++- src/cpu/exetrace.cc | 6 +- 4 files changed, 92 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/arch/sparc/isa/formats/mem/blockmem.isa b/src/arch/sparc/isa/formats/mem/blockmem.isa index 8bbbdc1da..5d05dad03 100644 --- a/src/arch/sparc/isa/formats/mem/blockmem.isa +++ b/src/arch/sparc/isa/formats/mem/blockmem.isa @@ -101,7 +101,7 @@ output header {{ // We make the assumption that all block memory operations // Will take 8 instructions to execute TwinMem(const char *mnem, ExtMachInst _machInst) : - SparcMacroInst(mnem, _machInst, No_OpClass, 8) + SparcMacroInst(mnem, _machInst, No_OpClass, 2) {} }; diff --git a/src/arch/sparc/isa/formats/mem/util.isa b/src/arch/sparc/isa/formats/mem/util.isa index 3f9146c21..b6e0945b7 100644 --- a/src/arch/sparc/isa/formats/mem/util.isa +++ b/src/arch/sparc/isa/formats/mem/util.isa @@ -285,9 +285,9 @@ let {{ fault = new MemAddressNotAligned; ''' TwinAlignmentFaultCheck = ''' - if(RD & 0xe) + if(RD & 0x1) fault = new IllegalInstruction; - else if(EA & 0x1f) + else if(EA & 0xf) fault = new MemAddressNotAligned; ''' # XXX Need to take care of pstate.hpriv as well. The lower ASIs diff --git a/src/arch/sparc/tlb.cc b/src/arch/sparc/tlb.cc index 1eb3aa53b..327ab659b 100644 --- a/src/arch/sparc/tlb.cc +++ b/src/arch/sparc/tlb.cc @@ -124,7 +124,8 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real, lookupTable.erase(i); } - lookupTable.insert(new_entry->range, new_entry);; + i = lookupTable.insert(new_entry->range, new_entry); + assert(i != lookupTable.end()); // If all entries have there used bit set, clear it on them all, but the // one we just inserted @@ -148,7 +149,7 @@ TLB::lookup(Addr va, int partition_id, bool real, int context_id) va, partition_id, context_id, real); // Assemble full address structure tr.va = va; - tr.size = va + MachineBytes; + tr.size = MachineBytes; tr.contextId = context_id; tr.partitionId = partition_id; tr.real = real; @@ -180,6 +181,7 @@ TLB::lookup(Addr va, int partition_id, bool real, int context_id) void TLB::dumpAll() { + MapIter i; for (int x = 0; x < size; x++) { if (tlb[x].valid) { DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n", @@ -196,11 +198,14 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id) TlbRange tr; MapIter i; + DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n", + va, partition_id, context_id, real); + cacheValid = false; // Assemble full address structure tr.va = va; - tr.size = va + MachineBytes; + tr.size = MachineBytes; tr.contextId = context_id; tr.partitionId = partition_id; tr.real = real; @@ -208,6 +213,7 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id) // Demap any entry that conflicts i = lookupTable.find(tr); if (i != lookupTable.end()) { + DPRINTF(IPR, "TLB: Demapped page\n"); i->second->valid = false; if (i->second->used) { i->second->used = false; @@ -221,6 +227,8 @@ void TLB::demapContext(int partition_id, int context_id) { int x; + DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n", + partition_id, context_id); cacheValid = false; for (x = 0; x < size; x++) { if (tlb[x].range.contextId == context_id && @@ -239,6 +247,7 @@ void TLB::demapAll(int partition_id) { int x; + DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id); cacheValid = false; for (x = 0; x < size; x++) { if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) { @@ -884,6 +893,9 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt) int part_insert; int entry_insert = -1; bool real_insert; + bool ignore; + int part_id; + int ctx_id; PageTableEntry pte; DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n", @@ -1003,6 +1015,41 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt) PageTableEntry::sun4u); insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert); break; + case ASI_IMMU_DEMAP: + ignore = false; + ctx_id = -1; + part_id = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID); + switch (bits(va,5,4)) { + case 0: + ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT); + break; + case 1: + ignore = true; + break; + case 3: + ctx_id = 0; + break; + default: + ignore = true; + } + + switch(bits(va,7,6)) { + case 0: // demap page + if (!ignore) + tc->getITBPtr()->demapPage(mbits(va,63,13), part_id, + bits(va,9,9), ctx_id); + break; + case 1: //demap context + if (!ignore) + tc->getITBPtr()->demapContext(part_id, ctx_id); + break; + case 2: + tc->getITBPtr()->demapAll(part_id); + break; + default: + panic("Invalid type for IMMU demap\n"); + } + break; case ASI_DMMU: switch (va) { case 0x30: @@ -1015,6 +1062,40 @@ DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt) goto doMmuWriteError; } break; + case ASI_DMMU_DEMAP: + ignore = false; + ctx_id = -1; + part_id = tc->readMiscRegWithEffect(MISCREG_MMU_PART_ID); + switch (bits(va,5,4)) { + case 0: + ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_P_CONTEXT); + break; + case 1: + ctx_id = tc->readMiscRegWithEffect(MISCREG_MMU_S_CONTEXT); + break; + case 3: + ctx_id = 0; + break; + default: + ignore = true; + } + + switch(bits(va,7,6)) { + case 0: // demap page + if (!ignore) + demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id); + break; + case 1: //demap context + if (!ignore) + demapContext(part_id, ctx_id); + break; + case 2: + demapAll(part_id); + break; + default: + panic("Invalid type for IMMU demap\n"); + } + break; default: doMmuWriteError: panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n", diff --git a/src/cpu/exetrace.cc b/src/cpu/exetrace.cc index 3fe40b4c1..dc76ae189 100644 --- a/src/cpu/exetrace.cc +++ b/src/cpu/exetrace.cc @@ -401,7 +401,11 @@ Trace::InstRecord::dump(ostream &outs) diffCcr || diffTl || diffGl || diffAsi || diffPil || diffCwp || diffCansave || diffCanrestore || diffOtherwin || diffCleanwin) - && !((staticInst->machInst & 0xC1F80000) == 0x81D00000)) { + && !((staticInst->machInst & 0xC1F80000) == 0x81D00000) + && !((staticInst->machInst & 0xC1F80000) == 0xC0580000) + && !((staticInst->machInst & 0xC1F80000) == 0xC0000000) + && !((staticInst->machInst & 0xC1F80000) == 0xC0700000)) { + outs << "Differences found between M5 and Legion:"; if (diffPC) outs << " [PC]"; -- cgit v1.2.3 From 6487d358a4588779bdae72e842380a50c780bf82 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Wed, 20 Dec 2006 21:46:16 -0800 Subject: Make sure that variables are always initalized! --HG-- extra : convert_revision : 1e946d9b1e1def36f9b8a73986dabf1b77096327 --- src/cpu/o3/cpu.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index a5a00015f..18cc87c0b 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -117,17 +117,18 @@ FullO3CPU::ActivateThreadEvent::description() template FullO3CPU::DeallocateContextEvent::DeallocateContextEvent() - : Event(&mainEventQueue, CPU_Tick_Pri) + : Event(&mainEventQueue, CPU_Tick_Pri), tid(0), remove(false), cpu(NULL) { } template void FullO3CPU::DeallocateContextEvent::init(int thread_num, - FullO3CPU *thread_cpu) + FullO3CPU *thread_cpu) { tid = thread_num; cpu = thread_cpu; + remove = false; } template @@ -606,7 +607,8 @@ FullO3CPU::suspendContext(int tid) DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid); bool deallocated = deallocateContext(tid, false, 1); // If this was the last thread then unschedule the tick event. - if ((activeThreads.size() == 1 && !deallocated) || activeThreads.size() == 0) + if (activeThreads.size() == 1 && !deallocated || + activeThreads.size() == 0) unscheduleTickEvent(); _status = Idle; } -- cgit v1.2.3 From 9aecfb3e3bfe1b85db9468bad287f22a2eb9bd4e Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Wed, 20 Dec 2006 22:20:11 -0800 Subject: don't use (*activeThreads).begin(), use activeThreads->blah(). Also don't call (*activeThreads).end() over and over. Just call activeThreads->end() once and save the result. Make sure we always check that there are elements in the list before we grab the first one. --HG-- extra : convert_revision : d769d8ed52da99532d57a9bbc93e92ddf22b7e58 --- src/cpu/o3/commit_impl.hh | 50 ++++++++------ src/cpu/o3/decode_impl.hh | 22 ++++--- src/cpu/o3/fetch_impl.hh | 33 ++++++---- src/cpu/o3/iew_impl.hh | 40 +++++++----- src/cpu/o3/inst_queue_impl.hh | 14 ++-- src/cpu/o3/lsq_impl.hh | 138 ++++++++++++++++++++++++--------------- src/cpu/o3/rename_impl.hh | 25 +++---- src/cpu/o3/rename_map.cc | 5 ++ src/cpu/o3/rob_impl.hh | 37 ++++++----- src/cpu/ozone/inst_queue_impl.hh | 16 +++-- 10 files changed, 226 insertions(+), 154 deletions(-) (limited to 'src') diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index f400d757b..d8236f077 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -387,9 +387,12 @@ void DefaultCommit::updateStatus() { // reset ROB changed variable - std::list::iterator threads = (*activeThreads).begin(); - while (threads != (*activeThreads).end()) { + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { unsigned tid = *threads++; + changedROBNumEntries[tid] = false; // Also check if any of the threads has a trap pending @@ -416,9 +419,10 @@ DefaultCommit::setNextStatus() { int squashes = 0; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (commitStatus[tid] == ROBSquashing) { @@ -439,9 +443,10 @@ template bool DefaultCommit::changedROBEntries() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (changedROBNumEntries[tid]) { @@ -563,14 +568,15 @@ DefaultCommit::tick() return; } - if ((*activeThreads).size() <= 0) + if (activeThreads->empty()) return; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); // Check if any of the threads are done squashing. Change the // status if they are done. - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (commitStatus[tid] == ROBSquashing) { @@ -591,9 +597,9 @@ DefaultCommit::tick() markCompletedInsts(); - threads = (*activeThreads).begin(); + threads = activeThreads->begin(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (!rob->isEmpty(tid) && rob->readHeadInst(tid)->readyToCommit()) { @@ -691,9 +697,10 @@ DefaultCommit::commit() //////////////////////////////////// // Check for any possible squashes, handle them first //////////////////////////////////// - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; // Not sure which one takes priority. I think if we have @@ -812,9 +819,9 @@ DefaultCommit::commit() } //Check for any activity - threads = (*activeThreads).begin(); + threads = activeThreads->begin(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (changedROBNumEntries[tid]) { @@ -1264,9 +1271,10 @@ template bool DefaultCommit::robDoneSquashing() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (!rob->isDoneSquashing(tid)) @@ -1345,7 +1353,8 @@ DefaultCommit::getCommittingThread() return -1; } } else { - int tid = (*activeThreads).front(); + assert(!activeThreads->empty()); + int tid = activeThreads->front(); if (commitStatus[tid] == Running || commitStatus[tid] == Idle || @@ -1392,9 +1401,10 @@ DefaultCommit::oldestReady() unsigned oldest = 0; bool first = true; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (!rob->isEmpty(tid) && diff --git a/src/cpu/o3/decode_impl.hh b/src/cpu/o3/decode_impl.hh index 80b6cc4c9..26ed40c67 100644 --- a/src/cpu/o3/decode_impl.hh +++ b/src/cpu/o3/decode_impl.hh @@ -424,10 +424,12 @@ template bool DefaultDecode::skidsEmpty() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { - if (!skidBuffer[*threads++].empty()) + while (threads != end) { + unsigned tid = *threads++; + if (!skidBuffer[tid].empty()) return false; } @@ -440,11 +442,10 @@ DefaultDecode::updateStatus() { bool any_unblocking = false; - std::list::iterator threads = (*activeThreads).begin(); - - threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (decodeStatus[tid] == Unblocking) { @@ -597,13 +598,14 @@ DefaultDecode::tick() toRenameIndex = 0; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); sortInsts(); //Check stall and squash signals. - while (threads != (*activeThreads).end()) { - unsigned tid = *threads++; + while (threads != end) { + unsigned tid = *threads++; DPRINTF(Decode,"Processing [tid:%i]\n",tid); status_change = checkSignalsAndUpdate(tid) || status_change; diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index 622259495..fe320fa79 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -756,10 +756,10 @@ typename DefaultFetch::FetchStatus DefaultFetch::updateFetchStatus() { //Check Running - std::list::iterator threads = (*activeThreads).begin(); - - while (threads != (*activeThreads).end()) { + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + while (threads != end) { unsigned tid = *threads++; if (fetchStatus[tid] == Running || @@ -819,12 +819,13 @@ template void DefaultFetch::tick() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); bool status_change = false; wroteToTimeBuffer = false; - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; // Check the signals for each thread to determine the proper status @@ -1363,7 +1364,9 @@ DefaultFetch::getFetchingThread(FetchPriority &fetch_priority) return -1; } } else { - int tid = *((*activeThreads).begin()); + std::list::iterator thread = activeThreads->begin(); + assert(thread != activeThreads->end()); + int tid = *thread; if (fetchStatus[tid] == Running || fetchStatus[tid] == IcacheAccessComplete || @@ -1413,9 +1416,10 @@ DefaultFetch::iqCount() { std::priority_queue PQ; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; PQ.push(fromIEW->iewInfo[tid].iqCount); @@ -1443,10 +1447,10 @@ DefaultFetch::lsqCount() { std::priority_queue PQ; + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - std::list::iterator threads = (*activeThreads).begin(); - - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; PQ.push(fromIEW->iewInfo[tid].ldstqCount); @@ -1472,7 +1476,10 @@ template int DefaultFetch::branchCount() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator thread = activeThreads->begin(); + assert(thread != activeThreads->end()); + unsigned tid = *thread; + panic("Branch Count Fetch policy unimplemented\n"); - return *threads; + return 0 * tid; } diff --git a/src/cpu/o3/iew_impl.hh b/src/cpu/o3/iew_impl.hh index 76047b295..d239bd951 100644 --- a/src/cpu/o3/iew_impl.hh +++ b/src/cpu/o3/iew_impl.hh @@ -671,10 +671,12 @@ DefaultIEW::skidCount() { int max=0; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { - unsigned thread_count = skidBuffer[*threads++].size(); + while (threads != end) { + unsigned tid = *threads++; + unsigned thread_count = skidBuffer[tid].size(); if (max < thread_count) max = thread_count; } @@ -686,10 +688,13 @@ template bool DefaultIEW::skidsEmpty() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (threads != (*activeThreads).end()) { - if (!skidBuffer[*threads++].empty()) + if (!skidBuffer[tid].empty()) return false; } @@ -702,11 +707,10 @@ DefaultIEW::updateStatus() { bool any_unblocking = false; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - threads = (*activeThreads).begin(); - - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (dispatchStatus[tid] == Unblocking) { @@ -1226,9 +1230,10 @@ DefaultIEW::executeInsts() wbNumInst = 0; wbCycle = 0; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; fetchRedirect[tid] = false; } @@ -1469,11 +1474,12 @@ DefaultIEW::tick() // Free function units marked as being freed this cycle. fuPool->processFreeUnits(); - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); // Check stall and squash signals, dispatch any instructions. - while (threads != (*activeThreads).end()) { - unsigned tid = *threads++; + while (threads != end) { + unsigned tid = *threads++; DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); @@ -1513,8 +1519,8 @@ DefaultIEW::tick() // nonspeculative instruction. // This is pretty inefficient... - threads = (*activeThreads).begin(); - while (threads != (*activeThreads).end()) { + threads = activeThreads->begin(); + while (threads != end) { unsigned tid = (*threads++); DPRINTF(IEW,"Processing [tid:%i]\n",tid); diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh index 6edb528a9..98b8fa900 100644 --- a/src/cpu/o3/inst_queue_impl.hh +++ b/src/cpu/o3/inst_queue_impl.hh @@ -426,16 +426,18 @@ void InstructionQueue::resetEntries() { if (iqPolicy != Dynamic || numThreads > 1) { - int active_threads = (*activeThreads).size(); + int active_threads = activeThreads->size(); - std::list::iterator threads = (*activeThreads).begin(); - std::list::iterator list_end = (*activeThreads).end(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (threads != list_end) { if (iqPolicy == Partitioned) { - maxEntries[*threads++] = numEntries / active_threads; + maxEntries[tid] = numEntries / active_threads; } else if(iqPolicy == Threshold && active_threads == 1) { - maxEntries[*threads++] = numEntries; + maxEntries[tid] = numEntries; } } } diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index 6758e51c8..cb40d552e 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -244,10 +244,7 @@ void LSQ::resetEntries() { if (lsqPolicy != Dynamic || numThreads > 1) { - int active_threads = (*activeThreads).size(); - - std::list::iterator threads = (*activeThreads).begin(); - std::list::iterator list_end = (*activeThreads).end(); + int active_threads = activeThreads->size(); int maxEntries; @@ -259,8 +256,13 @@ LSQ::resetEntries() maxEntries = LQEntries; } - while (threads != list_end) { - resizeEntries(maxEntries,*threads++); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; + + resizeEntries(maxEntries, tid); } } } @@ -285,10 +287,11 @@ template void LSQ::tick() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; + while (threads != end) { + unsigned tid = *threads++; thread[tid].tick(); } @@ -334,10 +337,11 @@ template void LSQ::writebackStores() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; + while (threads != end) { + unsigned tid = *threads++; if (numStoresToWB(tid) > 0) { DPRINTF(Writeback,"[tid:%i] Writing back stores. %i stores " @@ -353,10 +357,12 @@ bool LSQ::violation() { /* Answers: Does Anybody Have a Violation?*/ - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; if (thread[tid].violation()) return true; } @@ -370,10 +376,12 @@ LSQ::getCount() { unsigned total = 0; - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; total += getCount(tid); } @@ -386,10 +394,12 @@ LSQ::numLoads() { unsigned total = 0; - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; total += numLoads(tid); } @@ -402,10 +412,12 @@ LSQ::numStores() { unsigned total = 0; - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; total += thread[tid].numStores(); } @@ -418,10 +430,12 @@ LSQ::numLoadsReady() { unsigned total = 0; - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; total += thread[tid].numLoadsReady(); } @@ -434,10 +448,12 @@ LSQ::numFreeEntries() { unsigned total = 0; - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; total += thread[tid].numFreeEntries(); } @@ -458,11 +474,13 @@ template bool LSQ::isFull() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; - if (! (thread[tid].lqFull() || thread[tid].sqFull()) ) + while (threads != end) { + unsigned tid = *threads++; + + if (!(thread[tid].lqFull() || thread[tid].sqFull())) return false; } @@ -475,7 +493,7 @@ LSQ::isFull(unsigned tid) { //@todo: Change to Calculate All Entries for //Dynamic Policy - if( lsqPolicy == Dynamic ) + if (lsqPolicy == Dynamic) return isFull(); else return thread[tid].lqFull() || thread[tid].sqFull(); @@ -485,10 +503,12 @@ template bool LSQ::lqFull() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; if (!thread[tid].lqFull()) return false; } @@ -512,10 +532,12 @@ template bool LSQ::sqFull() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; if (!sqFull(tid)) return false; } @@ -539,10 +561,12 @@ template bool LSQ::isStalled() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; if (!thread[tid].isStalled()) return false; } @@ -564,13 +588,15 @@ template bool LSQ::hasStoresToWB() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - if ((*activeThreads).empty()) + if (threads == end) return false; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; + while (threads != end) { + unsigned tid = *threads++; + if (!hasStoresToWB(tid)) return false; } @@ -582,10 +608,12 @@ template bool LSQ::willWB() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; if (!willWB(tid)) return false; } @@ -597,10 +625,12 @@ template void LSQ::dumpInsts() { - std::list::iterator active_threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (active_threads != (*activeThreads).end()) { - unsigned tid = *active_threads++; thread[tid].dumpInsts(); } } diff --git a/src/cpu/o3/rename_impl.hh b/src/cpu/o3/rename_impl.hh index 248d7deb6..3a8e503a0 100644 --- a/src/cpu/o3/rename_impl.hh +++ b/src/cpu/o3/rename_impl.hh @@ -412,10 +412,11 @@ DefaultRename::tick() sortInsts(); - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); // Check stall and squash signals. - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; DPRINTF(Rename, "Processing [tid:%i]\n", tid); @@ -434,9 +435,9 @@ DefaultRename::tick() cpu->activityThisCycle(); } - threads = (*activeThreads).begin(); + threads = activeThreads->begin(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; // If we committed this cycle then doneSeqNum will be > 0 @@ -764,10 +765,13 @@ template bool DefaultRename::skidsEmpty() { - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { - if (!skidBuffer[*threads++].empty()) + while (threads != end) { + unsigned tid = *threads++; + + if (!skidBuffer[tid].empty()) return false; } @@ -780,11 +784,10 @@ DefaultRename::updateStatus() { bool any_unblocking = false; - std::list::iterator threads = (*activeThreads).begin(); - - threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (renameStatus[tid] == Unblocking) { diff --git a/src/cpu/o3/rename_map.cc b/src/cpu/o3/rename_map.cc index befbc3e8a..620daf691 100644 --- a/src/cpu/o3/rename_map.cc +++ b/src/cpu/o3/rename_map.cc @@ -180,6 +180,8 @@ SimpleRenameMap::rename(RegIndex arch_reg) // Subtract off the base offset for miscellaneous registers. arch_reg = arch_reg - numLogicalRegs; + DPRINTF(Rename, "Renamed misc reg %d\n", arch_reg); + // No renaming happens to the misc. registers. They are // simply the registers that come after all the physical // registers; thus take the base architected register and add @@ -194,6 +196,9 @@ SimpleRenameMap::rename(RegIndex arch_reg) assert(renamed_reg < numPhysicalRegs + numMiscRegs); } + DPRINTF(Rename, "Renamed reg %d to physical reg %d old mapping was %d\n", + arch_reg, renamed_reg, prev_reg); + return RenameInfo(renamed_reg, prev_reg); } diff --git a/src/cpu/o3/rob_impl.hh b/src/cpu/o3/rob_impl.hh index fab114a74..fde636754 100644 --- a/src/cpu/o3/rob_impl.hh +++ b/src/cpu/o3/rob_impl.hh @@ -155,16 +155,18 @@ void ROB::resetEntries() { if (robPolicy != Dynamic || numThreads > 1) { - int active_threads = (*activeThreads).size(); + int active_threads = activeThreads->size(); - std::list::iterator threads = (*activeThreads).begin(); - std::list::iterator list_end = (*activeThreads).end(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (threads != list_end) { if (robPolicy == Partitioned) { - maxEntries[*threads++] = numEntries / active_threads; + maxEntries[tid] = numEntries / active_threads; } else if (robPolicy == Threshold && active_threads == 1) { - maxEntries[*threads++] = numEntries; + maxEntries[tid] = numEntries; } } } @@ -318,9 +320,10 @@ bool ROB::canCommit() { //@todo: set ActiveThreads through ROB or CPU - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (isHeadReady(tid)) { @@ -432,22 +435,23 @@ ROB::updateHead() bool first_valid = true; // @todo: set ActiveThreads through ROB or CPU - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { - unsigned thread_num = *threads++; + while (threads != end) { + unsigned tid = *threads++; - if (instList[thread_num].empty()) + if (instList[tid].empty()) continue; if (first_valid) { - head = instList[thread_num].begin(); + head = instList[tid].begin(); lowest_num = (*head)->seqNum; first_valid = false; continue; } - InstIt head_thread = instList[thread_num].begin(); + InstIt head_thread = instList[tid].begin(); DynInstPtr head_inst = (*head_thread); @@ -472,9 +476,10 @@ ROB::updateTail() tail = instList[0].end(); bool first_valid = true; - std::list::iterator threads = (*activeThreads).begin(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); - while (threads != (*activeThreads).end()) { + while (threads != end) { unsigned tid = *threads++; if (instList[tid].empty()) { diff --git a/src/cpu/ozone/inst_queue_impl.hh b/src/cpu/ozone/inst_queue_impl.hh index 32a940241..84f2b2a19 100644 --- a/src/cpu/ozone/inst_queue_impl.hh +++ b/src/cpu/ozone/inst_queue_impl.hh @@ -342,16 +342,18 @@ void InstQueue::resetEntries() { if (iqPolicy != Dynamic || numThreads > 1) { - int active_threads = (*activeThreads).size(); + int active_threads = activeThreads->size(); - list::iterator threads = (*activeThreads).begin(); - list::iterator list_end = (*activeThreads).end(); + std::list::iterator threads = activeThreads->begin(); + std::list::iterator end = activeThreads->end(); + + while (threads != end) { + unsigned tid = *threads++; - while (threads != list_end) { if (iqPolicy == Partitioned) { - maxEntries[*threads++] = numEntries / active_threads; - } else if(iqPolicy == Threshold && active_threads == 1) { - maxEntries[*threads++] = numEntries; + maxEntries[tid] = numEntries / active_threads; + } else if (iqPolicy == Threshold && active_threads == 1) { + maxEntries[tid] = numEntries; } } } -- cgit v1.2.3 From 2cb2b508020bfcdaccbf7225f621bcdfd330c7bd Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Thu, 21 Dec 2006 15:49:16 -0800 Subject: move the swig initialization calls from src/sim/main.cc to src/python/swig/init.cc so that it's not as easy to forget about it when you add a new swig module. --HG-- extra : convert_revision : 5cc4ec0838e636aa761901effb8986de58d23e03 --- src/SConscript | 1 + src/python/swig/init.hh | 36 ++++++++++++++++++++++++++++++++++++ src/sim/main.cc | 9 ++------- 3 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 src/python/swig/init.hh (limited to 'src') diff --git a/src/SConscript b/src/SConscript index 178f5b345..6f323b938 100644 --- a/src/SConscript +++ b/src/SConscript @@ -128,6 +128,7 @@ base_sources = Split(''' mem/cache/cache_builder.cc + python/swig/init.cc python/swig/debug_wrap.cc python/swig/main_wrap.cc diff --git a/src/python/swig/init.hh b/src/python/swig/init.hh new file mode 100644 index 000000000..c376f5d59 --- /dev/null +++ b/src/python/swig/init.hh @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Nathan Binkert + */ + +#ifndef __PYTHON_SWIG_INIT_HH__ +#define __PYTHON_SWIG_INIT_HH__ + +void init_swig(); + +#endif // __PYTHON_SWIG_INIT_HH__ diff --git a/src/sim/main.cc b/src/sim/main.cc index 17209ac20..04dbe1ef4 100644 --- a/src/sim/main.cc +++ b/src/sim/main.cc @@ -60,6 +60,7 @@ #include "cpu/smt.hh" #include "mem/mem_object.hh" #include "mem/port.hh" +#include "python/swig/init.hh" #include "sim/async.hh" #include "sim/builder.hh" #include "sim/host.hh" @@ -117,11 +118,6 @@ abortHandler(int sigtype) #endif } -extern "C" { -void init_main(); -void init_debug(); -} - int main(int argc, char **argv) { @@ -159,8 +155,7 @@ main(int argc, char **argv) PySys_SetArgv(argc, argv); // initialize SWIG modules - init_main(); - init_debug(); + init_swig(); PyRun_SimpleString("import m5.main"); PyRun_SimpleString("m5.main.main()"); -- cgit v1.2.3 From 3f03e5f65658d78406ac545c041faa602b837dac Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Thu, 21 Dec 2006 15:58:38 -0800 Subject: Create a wrapper function to more easily add swig stuff to the build --HG-- extra : convert_revision : 3aaf540a9e314a88a8945579398f0d79aa85d5cf --- src/python/SConscript | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/python/SConscript b/src/python/SConscript index be6248bab..1e49a18b2 100644 --- a/src/python/SConscript +++ b/src/python/SConscript @@ -98,18 +98,15 @@ pyzip_files.append('m5/defines.py') pyzip_files.append('m5/info.py') pyzip_files.append(join(env['ROOT'], 'util/pbs/jobfile.py')) -env.Command(['swig/debug_wrap.cc', 'm5/internal/debug.py'], - 'swig/debug.i', - '$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} ' - '-o ${TARGETS[0]} $SOURCES') - -env.Command(['swig/main_wrap.cc', 'm5/internal/main.py'], - 'swig/main.i', - '$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} ' - '-o ${TARGETS[0]} $SOURCES') - -pyzip_dep_files.append('m5/internal/debug.py') -pyzip_dep_files.append('m5/internal/main.py') +def swig_it(basename): + env.Command(['swig/%s_wrap.cc' % basename, 'm5/internal/%s.py' % basename], + 'swig/%s.i' % basename, + '$SWIG $SWIGFLAGS -outdir ${TARGETS[1].dir} ' + '-o ${TARGETS[0]} $SOURCES') + pyzip_dep_files.append('m5/internal/%s.py' % basename) + +swig_it('main') +swig_it('debug') # Action function to build the zip archive. Uses the PyZipFile module # included in the standard Python library. -- cgit v1.2.3 From ba191d85c274934142430c1522a10ecdbc78d4f6 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Thu, 21 Dec 2006 22:34:19 -0800 Subject: style --HG-- extra : convert_revision : 6bbaaa88a608081eebf706ff30293f38729415aa --- src/cpu/o3/lsq_impl.hh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index cb40d552e..fb738f7c9 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -464,7 +464,7 @@ template unsigned LSQ::numFreeEntries(unsigned tid) { - //if( lsqPolicy == Dynamic ) + //if (lsqPolicy == Dynamic) //return numFreeEntries(); //else return thread[tid].numFreeEntries(); @@ -522,7 +522,7 @@ LSQ::lqFull(unsigned tid) { //@todo: Change to Calculate All Entries for //Dynamic Policy - if( lsqPolicy == Dynamic ) + if (lsqPolicy == Dynamic) return lqFull(); else return thread[tid].lqFull(); @@ -551,7 +551,7 @@ LSQ::sqFull(unsigned tid) { //@todo: Change to Calculate All Entries for //Dynamic Policy - if( lsqPolicy == Dynamic ) + if (lsqPolicy == Dynamic) return sqFull(); else return thread[tid].sqFull(); @@ -578,7 +578,7 @@ template bool LSQ::isStalled(unsigned tid) { - if( lsqPolicy == Dynamic ) + if (lsqPolicy == Dynamic) return isStalled(); else return thread[tid].isStalled(); -- cgit v1.2.3 From ecd1420341f6d96bd485cedf9b08a809d7e99176 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Thu, 21 Dec 2006 22:38:50 -0800 Subject: Expose the C++ event queue to python via the python function m5.internal.event.create(). It takes a python object and a Tick and calls process() when the Tick occurs. --HG-- extra : convert_revision : 5e4c9728982b206163ff51e6850a1497d85ad7a3 --- src/SConscript | 2 ++ src/python/SConscript | 1 + src/python/swig/event.i | 54 +++++++++++++++++++++++++++++++++++++ src/python/swig/pyevent.cc | 66 ++++++++++++++++++++++++++++++++++++++++++++++ src/python/swig/pyevent.hh | 48 +++++++++++++++++++++++++++++++++ 5 files changed, 171 insertions(+) create mode 100644 src/python/swig/event.i create mode 100644 src/python/swig/pyevent.cc create mode 100644 src/python/swig/pyevent.hh (limited to 'src') diff --git a/src/SConscript b/src/SConscript index 6f323b938..229418703 100644 --- a/src/SConscript +++ b/src/SConscript @@ -131,6 +131,8 @@ base_sources = Split(''' python/swig/init.cc python/swig/debug_wrap.cc python/swig/main_wrap.cc + python/swig/event_wrap.cc + python/swig/pyevent.cc sim/builder.cc sim/debug.cc diff --git a/src/python/SConscript b/src/python/SConscript index 1e49a18b2..df1464809 100644 --- a/src/python/SConscript +++ b/src/python/SConscript @@ -107,6 +107,7 @@ def swig_it(basename): swig_it('main') swig_it('debug') +swig_it('event') # Action function to build the zip archive. Uses the PyZipFile module # included in the standard Python library. diff --git a/src/python/swig/event.i b/src/python/swig/event.i new file mode 100644 index 000000000..554c9fa0e --- /dev/null +++ b/src/python/swig/event.i @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Nathan Binkert + */ + +%module event + +%{ +#include "python/swig/pyevent.hh" + +inline void +create(PyObject *object, Tick when) +{ + new PythonEvent(object, when); +} +%} + +%include "stdint.i" +%include "sim/host.hh" + +%inline %{ +extern void create(PyObject *object, Tick when); +%} + +%wrapper %{ +// fix up module name to reflect the fact that it's inside the m5 package +#undef SWIG_name +#define SWIG_name "m5.internal._event" +%} diff --git a/src/python/swig/pyevent.cc b/src/python/swig/pyevent.cc new file mode 100644 index 000000000..6fb7d3f17 --- /dev/null +++ b/src/python/swig/pyevent.cc @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Nathan Binkert + */ + +#include + +#include "python/swig/pyevent.hh" + +PythonEvent::PythonEvent(PyObject *obj, Tick when, Priority priority) + : Event(&mainEventQueue, priority), object(obj) +{ + if (object == NULL) + panic("Passed in invalid object"); + + Py_INCREF(object); + + setFlags(AutoDelete); + schedule(when); +} + +PythonEvent::~PythonEvent() +{ + Py_DECREF(object); +} + +void +PythonEvent::process() +{ + PyObject *result; + + result = PyObject_CallMethod(object, "process", ""); + + if (result) { + // Nothing to do just decrement the reference count + Py_DECREF(result); + } else { + // Somethign should be done to signal back to the main interpreter + // that there's been an exception. + } +} diff --git a/src/python/swig/pyevent.hh b/src/python/swig/pyevent.hh new file mode 100644 index 000000000..16af85a84 --- /dev/null +++ b/src/python/swig/pyevent.hh @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Nathan Binkert + */ + +#ifndef __PYTHON_SWIG_PYEVENT_HH__ +#define __PYTHON_SWIG_PYEVENT_HH__ + +#include "sim/eventq.hh" + +class PythonEvent : public Event +{ + private: + PyObject *object; + + public: + PythonEvent(PyObject *obj, Tick when, Priority priority = Default_Pri); + ~PythonEvent(); + + virtual void process(); +}; + +#endif // __PYTHON_SWIG_PYEVENT_HH__ -- cgit v1.2.3 From 139dcbe0889484cbb5a944f4abd0da76608d71b7 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Thu, 21 Dec 2006 22:41:08 -0800 Subject: Fix copyright --HG-- extra : convert_revision : 8ad7824885a5c4da80175c47ba5288aab55b06ca --- src/python/swig/debug.i | 30 ++++++++++++++++++++++++++++++ src/python/swig/init.hh | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/swig/debug.i b/src/python/swig/debug.i index 8da2974ca..b542e9f82 100644 --- a/src/python/swig/debug.i +++ b/src/python/swig/debug.i @@ -1,3 +1,33 @@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Nathan Binkert + */ + %module debug %{ diff --git a/src/python/swig/init.hh b/src/python/swig/init.hh index c376f5d59..23d2c19a9 100644 --- a/src/python/swig/init.hh +++ b/src/python/swig/init.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 The Regents of The University of Michigan + * Copyright (c) 2006 The Regents of The University of Michigan * All rights reserved. * * Redistribution and use in source and binary forms, with or without -- cgit v1.2.3 From e68a87e7fa3d9e75afb4acc96c0ac7c059bdedec Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Sun, 24 Dec 2006 14:06:56 -0800 Subject: remove some output formatting stuff that we don't use --HG-- extra : convert_revision : 367917499d3d7aebd0a91dad28c915bc85def624 --- src/base/cprintf.hh | 30 ------------------------------ src/unittest/cprintftest.cc | 3 --- 2 files changed, 33 deletions(-) (limited to 'src') diff --git a/src/base/cprintf.hh b/src/base/cprintf.hh index 6b2a77f90..9967b0578 100644 --- a/src/base/cprintf.hh +++ b/src/base/cprintf.hh @@ -165,36 +165,6 @@ __csprintf(const std::string &format, ArgList &args) #define csprintf(args...) \ __csprintf__(args, cp::ArgListNull()) -template -inline ArgList & -operator<<(ArgList &list, const T &data) -{ - list.append(data); - return list; -} - -inline ArgList & -operator<<(std::ostream &str, ArgList &list) -{ - list.stream = &str; - return list; -} - -class ArgListTemp -{ - private: - std::string format; - ArgList *args; - - public: - ArgListTemp(const std::string &f) : format(f) { args = new ArgList; } - ~ArgListTemp() { args->dump(format); delete args; } - - operator ArgList *() { return args; } -}; - -#define cformat(format) \ - (*((cp::ArgList *)cp::ArgListTemp(format))) } #endif // __CPRINTF_HH__ diff --git a/src/unittest/cprintftest.cc b/src/unittest/cprintftest.cc index a05426356..a454be05e 100644 --- a/src/unittest/cprintftest.cc +++ b/src/unittest/cprintftest.cc @@ -48,9 +48,6 @@ main() cprintf("%%s%-10s %c he went home \'\"%d %#o %#x %1.5f %1.2E\n", "hello", 'A', 1, 0xff, 0xfffffffffffffULL, 3.141592653589, 1.1e10); - cout << cformat("%s %#x %s\n") << "hello" << 0 << "foo 0\n"; - cerr << cformat("%s %#x\n") << "hello" << 1 << "foo 1\n"; - cprintf("another test\n"); stringstream buffer; -- cgit v1.2.3 From 2d029fe584c4d6ea356b653c73a50c4271188698 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Sun, 24 Dec 2006 15:15:12 -0800 Subject: Make sure that all of the bits in the result are set to some value. --HG-- extra : convert_revision : 1f1700fd77531cbb8cfad7f04ce2b573fcdefdab --- src/dev/alpha/tsunami_cchip.cc | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src') diff --git a/src/dev/alpha/tsunami_cchip.cc b/src/dev/alpha/tsunami_cchip.cc index eb51d4e49..15c47984b 100644 --- a/src/dev/alpha/tsunami_cchip.cc +++ b/src/dev/alpha/tsunami_cchip.cc @@ -88,6 +88,8 @@ TsunamiCChip::read(PacketPtr pkt) switch (pkt->getSize()) { case sizeof(uint64_t): + pkt->set(0); + if (daddr & TSDEV_CC_BDIMS) { pkt->set(dim[(daddr >> 4) & 0x3F]); -- cgit v1.2.3 From 0bd751848096d7446075e4c8aec43b1798deda67 Mon Sep 17 00:00:00 2001 From: Kevin Lim Date: Tue, 26 Dec 2006 01:43:18 -0500 Subject: Remove some #if FULL_SYSTEMs so MP stuff works even in SE mode. --HG-- extra : convert_revision : 5c334ec806305451b3883c7fd0ed9cd695c038bc --- src/cpu/o3/commit_impl.hh | 7 +++---- src/cpu/o3/iew_impl.hh | 2 -- src/cpu/o3/lsq_unit.hh | 2 -- 3 files changed, 3 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index d8236f077..c3c4983c5 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -988,20 +988,19 @@ DefaultCommit::commitHead(DynInstPtr &head_inst, unsigned inst_num) "instruction [sn:%lli] at the head of the ROB, PC %#x.\n", head_inst->seqNum, head_inst->readPC()); -#if !FULL_SYSTEM // Hack to make sure syscalls/memory barriers/quiesces // aren't executed until all stores write back their data. // This direct communication shouldn't be used for // anything other than this. - if (inst_num > 0 || iewStage->hasStoresToWB()) -#else if ((head_inst->isMemBarrier() || head_inst->isWriteBarrier() || head_inst->isQuiesce()) && iewStage->hasStoresToWB()) -#endif { DPRINTF(Commit, "Waiting for all stores to writeback.\n"); return false; + } else if (inst_num > 0) { + DPRINTF(Commit, "Waiting to become head of commit.\n"); + return false; } toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum; diff --git a/src/cpu/o3/iew_impl.hh b/src/cpu/o3/iew_impl.hh index d239bd951..a8962f2f7 100644 --- a/src/cpu/o3/iew_impl.hh +++ b/src/cpu/o3/iew_impl.hh @@ -1124,13 +1124,11 @@ DefaultIEW::dispatchInsts(unsigned tid) } toRename->iewInfo[tid].dispatchedToLSQ++; -#if FULL_SYSTEM } else if (inst->isMemBarrier() || inst->isWriteBarrier()) { // Same as non-speculative stores. inst->setCanCommit(); instQueue.insertBarrier(inst); add_to_iq = false; -#endif } else if (inst->isNonSpeculative()) { DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction " "encountered, skipping.\n", tid); diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index a2e11173e..14f9d5031 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -509,7 +509,6 @@ LSQUnit::read(Request *req, T &data, int load_idx) "storeHead: %i addr: %#x\n", load_idx, store_idx, storeHead, req->getPaddr()); -#if FULL_SYSTEM if (req->isLocked()) { // Disable recording the result temporarily. Writing to misc // regs normally updates the result, but this is not the @@ -518,7 +517,6 @@ LSQUnit::read(Request *req, T &data, int load_idx) TheISA::handleLockedRead(load_inst.get(), req); load_inst->recordResult = true; } -#endif while (store_idx != -1) { // End once we've reached the top of the LSQ -- cgit v1.2.3 From 9e90bfafb5b464dacc983c1c513f264d538bd9fd Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Wed, 27 Dec 2006 10:52:25 -0800 Subject: No need to use NULL, just use 0 The result of operator= cannot be an l-value --HG-- extra : convert_revision : df97a57f466e3498bd5a29638cb9912c7f3e1bd4 --- src/base/refcnt.hh | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/base/refcnt.hh b/src/base/refcnt.hh index 6672d4a5f..d1663ad72 100644 --- a/src/base/refcnt.hh +++ b/src/base/refcnt.hh @@ -28,10 +28,8 @@ * Authors: Nathan Binkert */ -#ifndef __REFCNT_HH__ -#define __REFCNT_HH__ - -#include //For the NULL macro definition +#ifndef __BASE_REFCNT_HH__ +#define __BASE_REFCNT_HH__ class RefCounted { @@ -77,7 +75,7 @@ class RefCountingPtr public: - RefCountingPtr() : data(NULL) {} + RefCountingPtr() : data(0) {} RefCountingPtr(T *data) { copy(data); } RefCountingPtr(const RefCountingPtr &r) { copy(r.data); } ~RefCountingPtr() { del(); } @@ -90,8 +88,8 @@ class RefCountingPtr const T &operator*() const { return *data; } const T *get() const { return data; } - RefCountingPtr &operator=(T *p) { set(p); return *this; } - RefCountingPtr &operator=(const RefCountingPtr &r) + const RefCountingPtr &operator=(T *p) { set(p); return *this; } + const RefCountingPtr &operator=(const RefCountingPtr &r) { return operator=(r.data); } bool operator!() const { return data == 0; } @@ -122,4 +120,4 @@ template bool operator!=(const T &l, const RefCountingPtr &r) { return l != r.get(); } -#endif // __REFCNT_HH__ +#endif // __BASE_REFCNT_HH__ -- cgit v1.2.3 From b6dc902f6acfd6cb855f73f97cbaf721c1b24a43 Mon Sep 17 00:00:00 2001 From: Ali Saidi Date: Wed, 27 Dec 2006 14:32:26 -0500 Subject: Change MemoryAccess dprintfs to print the data as well --HG-- extra : convert_revision : 51336fffa5e51a810ad2f6eb29b91c1bfd67824b --- src/mem/physical.cc | 51 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/mem/physical.cc b/src/mem/physical.cc index 6610e547d..7d616a4e5 100644 --- a/src/mem/physical.cc +++ b/src/mem/physical.cc @@ -42,6 +42,7 @@ #include "arch/isa_traits.hh" #include "base/misc.hh" #include "config/full_system.hh" +#include "mem/packet_access.hh" #include "mem/physical.hh" #include "sim/builder.hh" #include "sim/eventq.hh" @@ -203,18 +204,60 @@ PhysicalMemory::doFunctionalAccess(PacketPtr pkt) if (pkt->req->isLocked()) { trackLoadLocked(pkt->req); } - DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n", - pkt->getSize(), pkt->getAddr()); memcpy(pkt->getPtr(), pmemAddr + pkt->getAddr() - params()->addrRange.start, pkt->getSize()); +#if TRACING_ON + switch (pkt->getSize()) { + case sizeof(uint64_t): + DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + case sizeof(uint32_t): + DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + case sizeof(uint16_t): + DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + case sizeof(uint8_t): + DPRINTF(MemoryAccess, "Read of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + default: + DPRINTF(MemoryAccess, "Read of size %i on address 0x%x\n", + pkt->getSize(), pkt->getAddr()); + } +#endif } else if (pkt->isWrite()) { if (writeOK(pkt->req)) { - DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n", - pkt->getSize(), pkt->getAddr()); memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start, pkt->getPtr(), pkt->getSize()); +#if TRACING_ON + switch (pkt->getSize()) { + case sizeof(uint64_t): + DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + case sizeof(uint32_t): + DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + case sizeof(uint16_t): + DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + case sizeof(uint8_t): + DPRINTF(MemoryAccess, "Write of size %i on address 0x%x data 0x%x\n", + pkt->getSize(), pkt->getAddr(),pkt->get()); + break; + default: + DPRINTF(MemoryAccess, "Write of size %i on address 0x%x\n", + pkt->getSize(), pkt->getAddr()); + } +#endif } } else if (pkt->isInvalidate()) { -- cgit v1.2.3 From ff88f3b13ab03b1d6e8f371298843cd3b4d0b8cb Mon Sep 17 00:00:00 2001 From: Ali Saidi Date: Wed, 27 Dec 2006 14:35:23 -0500 Subject: Compare legion and m5 tlbs for differences Only print faults instructions that aren't traps or faulting loads src/cpu/exetrace.cc: Compare the legion and m5 tlbs and printout any differences Only show differences if the instruction isn't a trap and isn't a memory operation that changes the trap level (a fault) src/cpu/m5legion_interface.h: update the m5<->legion interface to add tlb data --HG-- extra : convert_revision : 6963b64ca1012604e6b1d3c5e0e5f5282fd0164e --- src/cpu/exetrace.cc | 34 ++++++++++++++++++++++++++++++---- src/cpu/m5legion_interface.h | 5 ++++- 2 files changed, 34 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cpu/exetrace.cc b/src/cpu/exetrace.cc index dc76ae189..352a11958 100644 --- a/src/cpu/exetrace.cc +++ b/src/cpu/exetrace.cc @@ -312,6 +312,7 @@ Trace::InstRecord::dump(ostream &outs) bool diffCanrestore = false; bool diffOtherwin = false; bool diffCleanwin = false; + bool diffTlb = false; Addr m5Pc, lgnPc; @@ -395,16 +396,23 @@ Trace::InstRecord::dump(ostream &outs) if(shared_data->cleanwin != thread->readMiscReg(MISCREG_CLEANWIN)) diffCleanwin = true; + for (int i = 0; i < 64; i++) { + if (shared_data->itb[i] != thread->getITBPtr()->TteRead(i)) + diffTlb = true; + if (shared_data->dtb[i] != thread->getDTBPtr()->TteRead(i)) + diffTlb = true; + } + if ((diffPC || diffCC || diffInst || diffRegs || diffTpc || diffTnpc || diffTstate || diffTt || diffHpstate || diffHtstate || diffHtba || diffPstate || diffY || diffCcr || diffTl || diffGl || diffAsi || diffPil || diffCwp || diffCansave || diffCanrestore || - diffOtherwin || diffCleanwin) + diffOtherwin || diffCleanwin || diffTlb) && !((staticInst->machInst & 0xC1F80000) == 0x81D00000) - && !((staticInst->machInst & 0xC1F80000) == 0xC0580000) - && !((staticInst->machInst & 0xC1F80000) == 0xC0000000) - && !((staticInst->machInst & 0xC1F80000) == 0xC0700000)) { + && !(((staticInst->machInst & 0xC0000000) == 0xC0000000) + && shared_data->tl == thread->readMiscReg(MISCREG_TL) + 1) + ) { outs << "Differences found between M5 and Legion:"; if (diffPC) @@ -453,6 +461,8 @@ Trace::InstRecord::dump(ostream &outs) outs << " [Otherwin]"; if (diffCleanwin) outs << " [Cleanwin]"; + if (diffTlb) + outs << " [Tlb]"; outs << endl << endl; outs << right << setfill(' ') << setw(15) @@ -577,6 +587,22 @@ Trace::InstRecord::dump(ostream &outs) << endl;*/ } } + printColumnLabels(outs); + char label[8]; + for (int x = 0; x < 64; x++) { + if (shared_data->itb[x] != ULL(0xFFFFFFFFFFFFFFFF) || + thread->getITBPtr()->TteRead(x) != ULL(0xFFFFFFFFFFFFFFFF)) { + sprintf(label, "I-TLB:%02d", x); + printRegPair(outs, label, thread->getITBPtr()->TteRead(x), shared_data->itb[x]); + } + } + for (int x = 0; x < 64; x++) { + if (shared_data->dtb[x] != ULL(0xFFFFFFFFFFFFFFFF) || + thread->getDTBPtr()->TteRead(x) != ULL(0xFFFFFFFFFFFFFFFF)) { + sprintf(label, "D-TLB:%02d", x); + printRegPair(outs, label, thread->getDTBPtr()->TteRead(x), shared_data->dtb[x]); + } + } thread->getITBPtr()->dumpAll(); thread->getDTBPtr()->dumpAll(); diff --git a/src/cpu/m5legion_interface.h b/src/cpu/m5legion_interface.h index bfb88485a..c3ba5986e 100644 --- a/src/cpu/m5legion_interface.h +++ b/src/cpu/m5legion_interface.h @@ -30,7 +30,7 @@ #include -#define VERSION 0xA1000006 +#define VERSION 0xA1000007 #define OWN_M5 0x000000AA #define OWN_LEGION 0x00000055 @@ -72,6 +72,9 @@ typedef struct { uint8_t otherwin; uint8_t cleanwin; + uint64_t itb[64]; + uint64_t dtb[64]; + } SharedData; /** !!! ^^^ Increment VERSION on change ^^^ !!! **/ -- cgit v1.2.3 From ba14d6d0e1debea686681e5738bcdb041522dca0 Mon Sep 17 00:00:00 2001 From: Ali Saidi Date: Wed, 27 Dec 2006 14:38:07 -0500 Subject: Bug fixes in the TLB Make our replacement algorithm same as legion (although not same as the spec) itb should be 64 entries not 48 src/arch/sparc/tlb.cc: Bug fixes in the TLB Make our replacement algorithm same as legion (although not same as the spec) src/arch/sparc/tlb.hh: Make our replacement algorithm same as legion (although not same as the spec) src/python/m5/objects/SparcTLB.py: itb should be 64 entries too --HG-- extra : convert_revision : 1b5cb3597091e3cfe293e94f6f2219b1e621c35f --- src/arch/sparc/tlb.cc | 115 +++++++++++++++++++++++++++++--------- src/arch/sparc/tlb.hh | 10 +++- src/python/m5/objects/SparcTLB.py | 2 +- 3 files changed, 97 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/arch/sparc/tlb.cc b/src/arch/sparc/tlb.cc index 327ab659b..40542a9a6 100644 --- a/src/arch/sparc/tlb.cc +++ b/src/arch/sparc/tlb.cc @@ -45,7 +45,8 @@ namespace SparcISA { TLB::TLB(const std::string &name, int s) - : SimObject(name), size(s), usedEntries(0), cacheValid(false) + : SimObject(name), size(s), usedEntries(0), lastReplaced(0), + cacheValid(false) { // To make this work you'll have to change the hypervisor and OS if (size > 64) @@ -53,13 +54,16 @@ TLB::TLB(const std::string &name, int s) tlb = new TlbEntry[size]; memset(tlb, 0, sizeof(TlbEntry) * size); + + for (int x = 0; x < size; x++) + freeList.push_back(&tlb[x]); } void TLB::clearUsedBits() { MapIter i; - for (i = lookupTable.begin(); i != lookupTable.end();) { + for (i = lookupTable.begin(); i != lookupTable.end(); i++) { TlbEntry *t = i->second; if (!t->pte.locked()) { t->used = false; @@ -77,32 +81,76 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real, MapIter i; TlbEntry *new_entry = NULL; + TlbRange tr; int x; cacheValid = false; + tr.va = va; + tr.size = PTE.size() - 1; + tr.contextId = context_id; + tr.partitionId = partition_id; + tr.real = real; + + + DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n", + va, PTE.paddr(), partition_id, context_id, (int)real, entry); + + // Demap any entry that conflicts + i = lookupTable.find(tr); + if (i != lookupTable.end()) { + i->second->valid = false; + if (i->second->used) { + i->second->used = false; + usedEntries--; + } + freeList.push_front(i->second); + DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n", + i->second); + lookupTable.erase(i); + } - DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d\n", - va, PTE.paddr(), partition_id, context_id, (int)real); if (entry != -1) { assert(entry < size && entry >= 0); new_entry = &tlb[entry]; } else { + if (!freeList.empty()) { + new_entry = freeList.front(); + } else { + x = lastReplaced; + do { + ++x; + if (x == size) + x = 0; + if (x == lastReplaced) + goto insertAllLocked; + } while (tlb[x].pte.locked()); + lastReplaced = x; + new_entry = &tlb[x]; + lookupTable.erase(new_entry->range); + } + /* for (x = 0; x < size; x++) { if (!tlb[x].valid || !tlb[x].used) { new_entry = &tlb[x]; break; } - } + }*/ } +insertAllLocked: // Update the last ently if their all locked - if (!new_entry) + if (!new_entry) { new_entry = &tlb[size-1]; + lookupTable.erase(new_entry->range); + } + + freeList.remove(new_entry); + DPRINTF(TLB, "Using entry: %#X\n", new_entry); assert(PTE.valid()); new_entry->range.va = va; - new_entry->range.size = PTE.size(); + new_entry->range.size = PTE.size() - 1; new_entry->range.partitionId = partition_id; new_entry->range.contextId = context_id; new_entry->range.real = real; @@ -112,17 +160,6 @@ TLB::insert(Addr va, int partition_id, int context_id, bool real, usedEntries++; - // Demap any entry that conflicts - i = lookupTable.find(new_entry->range); - if (i != lookupTable.end()) { - i->second->valid = false; - if (i->second->used) { - i->second->used = false; - usedEntries--; - } - DPRINTF(TLB, "TLB: Found conflicting entry, deleting it\n"); - lookupTable.erase(i); - } i = lookupTable.insert(new_entry->range, new_entry); assert(i != lookupTable.end()); @@ -219,6 +256,8 @@ TLB::demapPage(Addr va, int partition_id, bool real, int context_id) i->second->used = false; usedEntries--; } + freeList.push_front(i->second); + DPRINTF(TLB, "Freeing TLB entry : %#X\n", i->second); lookupTable.erase(i); } } @@ -233,6 +272,10 @@ TLB::demapContext(int partition_id, int context_id) for (x = 0; x < size; x++) { if (tlb[x].range.contextId == context_id && tlb[x].range.partitionId == partition_id) { + if (tlb[x].valid == true) { + freeList.push_front(&tlb[x]); + DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]); + } tlb[x].valid = false; if (tlb[x].used) { tlb[x].used = false; @@ -251,6 +294,10 @@ TLB::demapAll(int partition_id) cacheValid = false; for (x = 0; x < size; x++) { if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) { + if (tlb[x].valid == true){ + freeList.push_front(&tlb[x]); + DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]); + } tlb[x].valid = false; if (tlb[x].used) { tlb[x].used = false; @@ -267,7 +314,10 @@ TLB::invalidateAll() int x; cacheValid = false; + freeList.clear(); for (x = 0; x < size; x++) { + if (tlb[x].valid == true) + freeList.push_back(&tlb[x]); tlb[x].valid = false; } usedEntries = 0; @@ -275,17 +325,26 @@ TLB::invalidateAll() uint64_t TLB::TteRead(int entry) { + if (entry >= size) + panic("entry: %d\n", entry); + assert(entry < size); - return tlb[entry].pte(); + if (tlb[entry].valid) + return tlb[entry].pte(); + else + return (uint64_t)-1ll; } uint64_t TLB::TagRead(int entry) { assert(entry < size); uint64_t tag; + if (!tlb[entry].valid) + return (uint64_t)-1ll; - tag = tlb[entry].range.contextId | tlb[entry].range.va | - (uint64_t)tlb[entry].range.partitionId << 61; + tag = tlb[entry].range.contextId; + tag |= tlb[entry].range.va; + tag |= (uint64_t)tlb[entry].range.partitionId << 61; tag |= tlb[entry].range.real ? ULL(1) << 60 : 0; tag |= (uint64_t)~tlb[entry].pte._size() << 56; return tag; @@ -501,13 +560,13 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) // Be fast if we can! if (cacheValid && cacheState == tlbdata) { if (cacheEntry[0] && cacheAsi[0] == asi && cacheEntry[0]->range.va < vaddr + size && - cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) { + cacheEntry[0]->range.va + cacheEntry[0]->range.size > vaddr) { req->setPaddr(cacheEntry[0]->pte.paddr() & ~(cacheEntry[0]->pte.size()-1) | vaddr & cacheEntry[0]->pte.size()-1 ); return NoFault; } if (cacheEntry[1] && cacheAsi[1] == asi && cacheEntry[1]->range.va < vaddr + size && - cacheEntry[1]->range.va + cacheEntry[1]->range.size >= vaddr) { + cacheEntry[1]->range.va + cacheEntry[1]->range.size > vaddr) { req->setPaddr(cacheEntry[1]->pte.paddr() & ~(cacheEntry[1]->pte.size()-1) | vaddr & cacheEntry[1]->pte.size()-1 ); return NoFault; @@ -667,8 +726,12 @@ continueDtbFlow: } // cache translation date for next translation - cacheValid = true; cacheState = tlbdata; + if (!cacheValid) { + cacheEntry[1] = NULL; + cacheEntry[0] = NULL; + } + if (cacheEntry[0] != e && cacheEntry[1] != e) { cacheEntry[1] = cacheEntry[0]; cacheEntry[0] = e; @@ -677,7 +740,7 @@ continueDtbFlow: if (implicit) cacheAsi[0] = (ASI)0; } - + cacheValid = true; req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) | vaddr & e->pte.size()-1); DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr()); @@ -696,7 +759,7 @@ handleQueueRegAccess: writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); return new PrivilegedAction; } - if (priv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) { + if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) { writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); return new DataAccessException; } diff --git a/src/arch/sparc/tlb.hh b/src/arch/sparc/tlb.hh index a6e6a8bd3..34e5f5feb 100644 --- a/src/arch/sparc/tlb.hh +++ b/src/arch/sparc/tlb.hh @@ -54,10 +54,13 @@ class TLB : public SimObject int size; int usedEntries; + int lastReplaced; uint64_t cacheState; bool cacheValid; + std::list freeList; + enum FaultTypes { OtherFault = 0, PrivViolation = 0x1, @@ -93,9 +96,6 @@ class TLB : public SimObject /** Given an entry id, read that tlb entries' tag. */ uint64_t TagRead(int entry); - /** Give an entry id, read that tlb entries' tte */ - uint64_t TteRead(int entry); - /** Remove all entries from the TLB */ void invalidateAll(); @@ -128,6 +128,10 @@ class TLB : public SimObject // Checkpointing virtual void serialize(std::ostream &os); virtual void unserialize(Checkpoint *cp, const std::string §ion); + + /** Give an entry id, read that tlb entries' tte */ + uint64_t TteRead(int entry); + }; class ITB : public TLB diff --git a/src/python/m5/objects/SparcTLB.py b/src/python/m5/objects/SparcTLB.py index de732e8de..06d2a8231 100644 --- a/src/python/m5/objects/SparcTLB.py +++ b/src/python/m5/objects/SparcTLB.py @@ -11,4 +11,4 @@ class SparcDTB(SparcTLB): class SparcITB(SparcTLB): type = 'SparcITB' - size = 48 + size = 64 -- cgit v1.2.3 From 81e0ac300036f0de0d8959d2320cbd6a2b3ae285 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Fri, 29 Dec 2006 16:57:45 -0800 Subject: Formatting --HG-- extra : convert_revision : f5a940a8b9aaba0703781b398cf29be581907c21 --- src/python/m5/objects/BaseCPU.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/m5/objects/BaseCPU.py b/src/python/m5/objects/BaseCPU.py index 8037c90af..67a28a61e 100644 --- a/src/python/m5/objects/BaseCPU.py +++ b/src/python/m5/objects/BaseCPU.py @@ -41,7 +41,8 @@ class BaseCPU(SimObject): "terminate when all threads have reached this load count") max_loads_any_thread = Param.Counter(0, "terminate when any thread reaches this load count") - progress_interval = Param.Tick(0, "interval to print out the progress message") + progress_interval = Param.Tick(0, + "interval to print out the progress message") defer_registration = Param.Bool(False, "defer registration with system (for sampling)") -- cgit v1.2.3 From 7d7f3d0e99eca98a5659e73bce56d615f0ed4fc3 Mon Sep 17 00:00:00 2001 From: Kevin Lim Date: Sat, 30 Dec 2006 13:21:25 -0500 Subject: Fix up previous commit to proper logic. src/cpu/o3/commit_impl.hh: Oops, changed the logic a little bit. Fix it up to how it used to be. --HG-- extra : convert_revision : df7f69b0997207b611374c3c92880f3a405e88be --- src/cpu/o3/commit_impl.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index c3c4983c5..96f094926 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -998,7 +998,7 @@ DefaultCommit::commitHead(DynInstPtr &head_inst, unsigned inst_num) { DPRINTF(Commit, "Waiting for all stores to writeback.\n"); return false; - } else if (inst_num > 0) { + } else if (inst_num > 0 || iewStage->hasStoresToWB()) { DPRINTF(Commit, "Waiting to become head of commit.\n"); return false; } -- cgit v1.2.3