diff options
author | Erik Hallnor <ehallnor@umich.edu> | 2004-09-30 01:46:48 -0400 |
---|---|---|
committer | Erik Hallnor <ehallnor@umich.edu> | 2004-09-30 01:46:48 -0400 |
commit | f2ac7b645f9c352bfb2697e2893beef62af0d08a (patch) | |
tree | 08b57dc74964f01c308def466c3fd1aab77b82a7 | |
parent | 34742515f53fa807faa1809ef3cc7d319b9a244e (diff) | |
download | gem5-f2ac7b645f9c352bfb2697e2893beef62af0d08a.tar.xz |
Updates to make traces work correctly in all circumstances. Add opt set associative simulation.
cpu/trace/opt_cpu.cc:
cpu/trace/opt_cpu.hh:
Add the ability to simulate less than fully-associative opt caches.
cpu/trace/reader/itx_reader.cc:
Add writeback to the command list.
--HG--
extra : convert_revision : a9c9c4be3358f4083d7e85772620441a3ad809db
-rw-r--r-- | cpu/trace/opt_cpu.cc | 108 | ||||
-rw-r--r-- | cpu/trace/opt_cpu.hh | 55 | ||||
-rw-r--r-- | cpu/trace/reader/itx_reader.cc | 3 |
3 files changed, 97 insertions, 69 deletions
diff --git a/cpu/trace/opt_cpu.cc b/cpu/trace/opt_cpu.cc index c23b72856..22d12f079 100644 --- a/cpu/trace/opt_cpu.cc +++ b/cpu/trace/opt_cpu.cc @@ -44,34 +44,37 @@ using namespace std; OptCPU::OptCPU(const string &name, - MemTraceReader *_trace, - int log_block_size, - int cache_size) + MemTraceReader *_trace, + int block_size, + int cache_size, + int _assoc) : BaseCPU(name,1), tickEvent(this), trace(_trace), - numBlks(cache_size/(1<<log_block_size)) + numBlks(cache_size/block_size), assoc(_assoc), numSets(numBlks/assoc), + setMask(numSets - 1) { + int log_block_size = (int)(log((double) block_size)/log(2.0)); MemReqPtr req; trace->getNextReq(req); - assert(log_block_size >= 4); - assert(refInfo.size() == 0); - while (req && (refInfo.size() < 60000000)) { + refInfo.resize(numSets); + while (req) { RefInfo temp; temp.addr = req->paddr >> log_block_size; - refInfo.push_back(temp); + int set = temp.addr & setMask; + refInfo[set].push_back(temp); trace->getNextReq(req); } - // Can't handle more references than "infinity" - assert(refInfo.size() < InfiniteRef); // Initialize top level of lookup table. lookupTable.resize(16); // Annotate references with next ref time. - for (RefIndex i = refInfo.size() - 1; i >= 0; --i) { - Addr addr = refInfo[i].addr; - initTable(addr, InfiniteRef); - refInfo[i].nextRefTime = lookupValue(addr); - setValue(addr, i); + for (int k = 0; k < numSets; ++k) { + for (RefIndex i = refInfo[k].size() - 1; i >= 0; --i) { + Addr addr = refInfo[k][i].addr; + initTable(addr, InfiniteRef); + refInfo[k][i].nextRefTime = lookupValue(addr); + setValue(addr, i); + } } // Reset the lookup table @@ -87,9 +90,6 @@ OptCPU::OptCPU(const string &name, } } - - cacheHeap.resize(numBlks); - tickEvent.schedule(0); hits = 0; @@ -97,57 +97,72 @@ OptCPU::OptCPU(const string &name, } void -OptCPU::tick() +OptCPU::processSet(int set) { - // Do opt simulation - // Initialize cache int blks_in_cache = 0; RefIndex i = 0; + cacheHeap.clear(); + cacheHeap.resize(assoc); - while (blks_in_cache < numBlks) { - RefIndex cache_index = lookupValue(refInfo[i].addr); + while (blks_in_cache < assoc) { + RefIndex cache_index = lookupValue(refInfo[set][i].addr); if (cache_index == -1) { // First reference to this block misses++; cache_index = blks_in_cache++; - setValue(refInfo[i].addr, cache_index); + setValue(refInfo[set][i].addr, cache_index); } else { hits++; } // update cache heap to most recent reference cacheHeap[cache_index] = i; - if (++i >= refInfo.size()) { - // exit + if (++i >= refInfo[set].size()) { + return; } } - for (int start = numBlks/2; start >= 0; --start) { - heapify(start); + for (int start = assoc/2; start >= 0; --start) { + heapify(set,start); } - //verifyHeap(0); + verifyHeap(set,0); - for (; i < refInfo.size(); ++i) { - RefIndex cache_index = lookupValue(refInfo[i].addr); + for (; i < refInfo[set].size(); ++i) { + RefIndex cache_index = lookupValue(refInfo[set][i].addr); if (cache_index == -1) { // miss misses++; // replace from cacheHeap[0] // mark replaced block as absent - setValue(refInfo[cacheHeap[0]].addr, -1); + setValue(refInfo[set][cacheHeap[0]].addr, -1); cacheHeap[0] = i; - heapify(0); + heapify(set, 0); } else { // hit hits++; - assert(refInfo[cacheHeap[cache_index]].addr == refInfo[i].addr); - assert(refInfo[cacheHeap[cache_index]].nextRefTime == i); - assert(heapLeft(cache_index) >= numBlks); + assert(refInfo[set][cacheHeap[cache_index]].addr == + refInfo[set][i].addr); + assert(refInfo[set][cacheHeap[cache_index]].nextRefTime == i); + assert(heapLeft(cache_index) >= assoc); } cacheHeap[cache_index] = i; - processRankIncrease(cache_index); + processRankIncrease(set, cache_index); + } +} +void +OptCPU::tick() +{ + // Do opt simulation + + int references = 0; + for (int set = 0; set < numSets; ++set) { + if (!refInfo[set].empty()) { + processSet(set); + } + references += refInfo[set].size(); } // exit; - fprintf(stderr, "%d, %d, %d\n", misses, hits, refInfo.size()); + fprintf(stderr, "OPT Misses: %d\nOPT Hits: %d\nOPT Accesses: %d\n", + misses, hits, references); new SimExitEvent("Finshed Memory Trace"); } @@ -185,26 +200,29 @@ OptCPU::TickEvent::description() BEGIN_DECLARE_SIM_OBJECT_PARAMS(OptCPU) - SimObjectParam<MemTraceReader *> trace; + SimObjectParam<MemTraceReader *> data_trace; Param<int> size; - Param<int> log_block_size; + Param<int> block_size; +Param<int> assoc; END_DECLARE_SIM_OBJECT_PARAMS(OptCPU) BEGIN_INIT_SIM_OBJECT_PARAMS(OptCPU) - INIT_PARAM_DFLT(trace, "instruction cache", NULL), + INIT_PARAM_DFLT(data_trace, "memory trace", NULL), INIT_PARAM(size, "cache size"), - INIT_PARAM(log_block_size, "log base 2 of block size") + INIT_PARAM(block_size, "block size"), + INIT_PARAM(assoc,"associativity") END_INIT_SIM_OBJECT_PARAMS(OptCPU) CREATE_SIM_OBJECT(OptCPU) { return new OptCPU(getInstanceName(), - trace, - log_block_size, - size); + data_trace, + block_size, + size, + assoc); } REGISTER_SIM_OBJECT("OptCPU", OptCPU) diff --git a/cpu/trace/opt_cpu.hh b/cpu/trace/opt_cpu.hh index e366c2068..847147b3c 100644 --- a/cpu/trace/opt_cpu.hh +++ b/cpu/trace/opt_cpu.hh @@ -90,8 +90,8 @@ class OptCPU : public BaseCPU Addr addr; }; - /** Reference Information. */ - std::vector<RefInfo> refInfo; + /** Reference Information, per set. */ + std::vector<std::vector<RefInfo> > refInfo; /** Lookup table to track blocks in the cache heap */ L1Table lookupTable; @@ -125,63 +125,65 @@ class OptCPU : public BaseCPU */ void initTable(Addr addr, RefIndex index); - void heapSwap(int a, int b) { + void heapSwap(int set, int a, int b) { RefIndex tmp = cacheHeap[a]; cacheHeap[a] = cacheHeap[b]; cacheHeap[b] = tmp; - setValue(refInfo[cacheHeap[a]].addr, a); - setValue(refInfo[cacheHeap[b]].addr, b); + setValue(refInfo[set][cacheHeap[a]].addr, a); + setValue(refInfo[set][cacheHeap[b]].addr, b); } int heapLeft(int index) { return index + index + 1; } int heapRight(int index) { return index + index + 2; } int heapParent(int index) { return (index - 1) >> 1; } - RefIndex heapRank(int index) { - return refInfo[cacheHeap[index]].nextRefTime; + RefIndex heapRank(int set, int index) { + return refInfo[set][cacheHeap[index]].nextRefTime; } - void heapify(int start){ + void heapify(int set, int start){ int left = heapLeft(start); int right = heapRight(start); int max = start; - if (left < numBlks && heapRank(left) > heapRank(start)) { + if (left < assoc && heapRank(set, left) > heapRank(set, start)) { max = left; } - if (right < numBlks && heapRank(right) > heapRank(max)) { + if (right < assoc && heapRank(set, right) > heapRank(set, max)) { max = right; } if (max != start) { - heapSwap(start, max); - heapify(max); + heapSwap(set, start, max); + heapify(set, max); } } - void verifyHeap(int start) { + void verifyHeap(int set, int start) { int left = heapLeft(start); int right = heapRight(start); - if (left < numBlks) { - assert(heapRank(start) >= heapRank(left)); - verifyHeap(left); + if (left < assoc) { + assert(heapRank(set, start) >= heapRank(set, left)); + verifyHeap(set, left); } - if (right < numBlks) { - assert(heapRank(start) >= heapRank(right)); - verifyHeap(right); + if (right < assoc) { + assert(heapRank(set, start) >= heapRank(set, right)); + verifyHeap(set, right); } } - void processRankIncrease(int start) { + void processRankIncrease(int set, int start) { int parent = heapParent(start); - while (start > 0 && heapRank(parent) < heapRank(start)) { - heapSwap(parent, start); + while (start > 0 && heapRank(set,parent) < heapRank(set,start)) { + heapSwap(set, parent, start); start = parent; parent = heapParent(start); } } + void processSet(int set); + static const RefIndex InfiniteRef = 0x7fffffff; /** Memory reference trace. */ @@ -193,6 +195,10 @@ class OptCPU : public BaseCPU /** The number of blocks in the cache. */ const int numBlks; + const int assoc; + const int numSets; + const int setMask; + int misses; int hits; @@ -203,8 +209,9 @@ class OptCPU : public BaseCPU */ OptCPU(const std::string &name, MemTraceReader *_trace, - int log_block_size, - int cache_size); + int block_size, + int cache_size, + int assoc); /** * Perform the optimal replacement simulation. diff --git a/cpu/trace/reader/itx_reader.cc b/cpu/trace/reader/itx_reader.cc index 56f06c870..006fcc9dd 100644 --- a/cpu/trace/reader/itx_reader.cc +++ b/cpu/trace/reader/itx_reader.cc @@ -161,6 +161,9 @@ ITXReader::getNextReq(MemReqPtr &req) case ITXWrite: tmp_req->cmd = Write; break; + case ITXWriteback: + tmp_req->cmd = Writeback; + break; case ITXCode: tmp_req->cmd = Read; tmp_req->flags |= INST_READ; |