diff options
author | Ron Dreslinski <rdreslin@umich.edu> | 2004-11-12 14:40:07 -0500 |
---|---|---|
committer | Ron Dreslinski <rdreslin@umich.edu> | 2004-11-12 14:40:07 -0500 |
commit | 501db90f2ce7c083294e7981d6ef78b1420d671a (patch) | |
tree | d817a12fc7a56902ea07637433b0c6e416a1d164 /cpu/memtest | |
parent | e2de2ea192b23b32b5ca7578cc9b6660ef3e0404 (diff) | |
download | gem5-501db90f2ce7c083294e7981d6ef78b1420d671a.tar.xz |
Make changes so that coherence works on a timing bus for the top-level of caches.
This added a snoopResponse callback to the caches, and a NACK to requests.
cpu/memtest/memtest.cc:
Modified to work with do_events:
No multiple requests to the same block outstanding at the same time from the same tester
Using false sharing, each tester does only 1 byte access using it's id as the blk offset
Allow for cycles before signaling deadlock, with do events it can take time to complete (NACK/blocked bus)
cpu/memtest/memtest.hh:
Updated to keep an id with each tester (used for address generation)
Updated to keep a list of outstanding address to prevent multiple outstanding per tester
//Should really look into doing store forwarding within the tester, then we can test more functionality
--HG--
extra : convert_revision : 05fbcf547e4ffab9d220aeb73126ed787ca82239
Diffstat (limited to 'cpu/memtest')
-rw-r--r-- | cpu/memtest/memtest.cc | 49 | ||||
-rw-r--r-- | cpu/memtest/memtest.hh | 15 |
2 files changed, 56 insertions, 8 deletions
diff --git a/cpu/memtest/memtest.cc b/cpu/memtest/memtest.cc index 6584a62ba..e967c79da 100644 --- a/cpu/memtest/memtest.cc +++ b/cpu/memtest/memtest.cc @@ -28,9 +28,10 @@ // FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded -#include <string> -#include <sstream> #include <iomanip> +#include <set> +#include <sstream> +#include <string> #include <vector> #include "base/misc.hh" @@ -44,6 +45,8 @@ using namespace std; +int TESTER_ALLOCATOR=0; + MemTest::MemTest(const string &name, MemInterface *_cache_interface, FunctionalMemory *main_mem, @@ -111,6 +114,8 @@ MemTest::MemTest(const string &name, noResponseCycles = 0; numReads = 0; tickEvent.schedule(0); + + id = TESTER_ALLOCATOR++; } static void @@ -127,6 +132,11 @@ printData(ostream &os, uint8_t *data, int nbytes) void MemTest::completeRequest(MemReqPtr &req, uint8_t *data) { + //Remove the address from the list of outstanding + std::set<unsigned>::iterator removeAddr = outstandingAddrs.find(req->paddr); + assert(removeAddr != outstandingAddrs.end()); + outstandingAddrs.erase(removeAddr); + switch (req->cmd) { case Read: if (memcmp(req->data, data, req->size) != 0) { @@ -158,6 +168,10 @@ MemTest::completeRequest(MemReqPtr &req, uint8_t *data) break; case Copy: + //Also remove dest from outstanding list + removeAddr = outstandingAddrs.find(req->dest); + assert(removeAddr != outstandingAddrs.end()); + outstandingAddrs.erase(removeAddr); numCopiesStat++; break; @@ -212,7 +226,7 @@ MemTest::tick() if (!tickEvent.scheduled()) tickEvent.schedule(curTick + 1); - if (++noResponseCycles >= 5000) { + if (++noResponseCycles >= 500000) { cerr << name() << ": deadlocked at cycle " << curTick << endl; fatal(""); } @@ -232,6 +246,16 @@ MemTest::tick() unsigned source_align = rand() % 100; unsigned dest_align = rand() % 100; + //If we aren't doing copies, use id as offset, and do a false sharing + //mem tester + if (percentCopies == 0) { + //We can eliminate the lower bits of the offset, and then use the id + //to offset within the blks + offset1 &= ~63; //Not the low order bits + offset1 += id; + access_size = 0; + } + MemReqPtr req = new MemReq(); if (cacheable < percentUncacheable) { @@ -251,6 +275,13 @@ MemTest::tick() if (cmd < percentReads) { // read + + //For now we only allow one outstanding request per addreess per tester + //This means we assume CPU does write forwarding to reads that alias something + //in the cpu store buffer. + if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return; + else outstandingAddrs.insert(req->paddr); + req->cmd = Read; uint8_t *result = new uint8_t[8]; checkMem->access(Read, req->paddr, result, req->size); @@ -273,6 +304,13 @@ MemTest::tick() } } else if (cmd < (100 - percentCopies)){ // write + + //For now we only allow one outstanding request per addreess per tester + //This means we assume CPU does write forwarding to reads that alias something + //in the cpu store buffer. + if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return; + else outstandingAddrs.insert(req->paddr); + req->cmd = Write; memcpy(req->data, &data, req->size); checkMem->access(Write, req->paddr, req->data, req->size); @@ -298,6 +336,11 @@ MemTest::tick() // copy Addr source = ((base) ? baseAddr1 : baseAddr2) + offset1; Addr dest = ((base) ? baseAddr2 : baseAddr1) + offset2; + if (outstandingAddrs.find(source) != outstandingAddrs.end()) return; + else outstandingAddrs.insert(source); + if (outstandingAddrs.find(dest) != outstandingAddrs.end()) return; + else outstandingAddrs.insert(dest); + if (source_align >= percentSourceUnaligned) { source = blockAddr(source); } diff --git a/cpu/memtest/memtest.hh b/cpu/memtest/memtest.hh index 72e0709d9..43b17a713 100644 --- a/cpu/memtest/memtest.hh +++ b/cpu/memtest/memtest.hh @@ -29,13 +29,14 @@ #ifndef __MEMTEST_HH__ #define __MEMTEST_HH__ -#include "sim/sim_object.hh" -#include "mem/mem_interface.hh" -#include "mem/functional_mem/functional_memory.hh" -#include "cpu/base_cpu.hh" -#include "cpu/exec_context.hh" +#include <set> #include "base/statistics.hh" +#include "cpu/base_cpu.hh" +#include "cpu/exec_context.hh" +#include "mem/functional_mem/functional_memory.hh" +#include "mem/mem_interface.hh" +#include "sim/sim_object.hh" #include "sim/stats.hh" class MemTest : public BaseCPU @@ -87,6 +88,10 @@ class MemTest : public BaseCPU unsigned percentCopies; // target percentage of copy accesses unsigned percentUncacheable; + int id; + + std::set<unsigned> outstandingAddrs; + unsigned blockSize; Addr blockAddrMask; |