summaryrefslogtreecommitdiff
path: root/src/cpu/memtest/memtest.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/memtest/memtest.cc')
-rw-r--r--src/cpu/memtest/memtest.cc15
1 files changed, 7 insertions, 8 deletions
diff --git a/src/cpu/memtest/memtest.cc b/src/cpu/memtest/memtest.cc
index 42889163a..3c57f85b7 100644
--- a/src/cpu/memtest/memtest.cc
+++ b/src/cpu/memtest/memtest.cc
@@ -152,7 +152,7 @@ MemTest::MemTest(const Params *p)
// set up counters
noResponseCycles = 0;
numReads = 0;
- tickEvent.schedule(0);
+ schedule(tickEvent, 0);
id = TESTER_ALLOCATOR++;
@@ -262,7 +262,7 @@ void
MemTest::tick()
{
if (!tickEvent.scheduled())
- tickEvent.schedule(curTick + ticks(1));
+ schedule(tickEvent, curTick + ticks(1));
if (++noResponseCycles >= 500000) {
cerr << name() << ": deadlocked at cycle " << curTick << endl;
@@ -279,7 +279,7 @@ MemTest::tick()
unsigned base = random() % 2;
uint64_t data = random();
unsigned access_size = random() % 4;
- unsigned cacheable = random() % 100;
+ bool uncacheable = (random() % 100) < percentUncacheable;
//If we aren't doing copies, use id as offset, and do a false sharing
//mem tester
@@ -290,17 +290,16 @@ MemTest::tick()
access_size = 0;
Request *req = new Request();
- uint32_t flags = 0;
+ Request::Flags flags;
Addr paddr;
- if (cacheable < percentUncacheable) {
- flags |= UNCACHEABLE;
+ if (uncacheable) {
+ flags.set(Request::UNCACHEABLE);
paddr = uncacheAddr + offset;
} else {
paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
}
- bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
- //bool probe = false;
+ bool probe = (random() % 100 < percentFunctional) && !uncacheable;
paddr &= ~((1 << access_size) - 1);
req->setPhys(paddr, 1 << access_size, flags);