summaryrefslogtreecommitdiff
path: root/src/cpu/testers/memtest
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2014-09-03 07:42:54 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2014-09-03 07:42:54 -0400
commit2698e739660516af442c0f913eb0e91a00e7b7db (patch)
tree331dfa865e3b36d5187353fe3db57f93c73eb0e0 /src/cpu/testers/memtest
parent1ff4c45bbbaa22d5bd91e9bdd34d4435290ab8be (diff)
downloadgem5-2698e739660516af442c0f913eb0e91a00e7b7db.tar.xz
base: Use the global Mersenne twister throughout
This patch tidies up random number generation to ensure that it is done consistently throughout the code base. In essence this involves a clean-up of Ruby, and some code simplifications in the traffic generator. As part of this patch a bunch of skewed distributions (off-by-one etc) have been fixed. Note that a single global random number generator is used, and that the object instantiation order will impact the behaviour (the sequence of numbers will be unaffected, but if module A calles random before module B then they would obviously see a different outcome). The dependency on the instantiation order is true in any case due to the execution-model of gem5, so we leave it as is. Also note that the global ranom generator is not thread safe at this point. Regressions using the memtest, TrafficGen or any Ruby tester are affected and will be updated accordingly.
Diffstat (limited to 'src/cpu/testers/memtest')
-rw-r--r--src/cpu/testers/memtest/memtest.cc18
1 files changed, 10 insertions, 8 deletions
diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc
index 7f3ff0d03..6dc2ccb73 100644
--- a/src/cpu/testers/memtest/memtest.cc
+++ b/src/cpu/testers/memtest/memtest.cc
@@ -37,6 +37,7 @@
#include <vector>
#include "base/misc.hh"
+#include "base/random.hh"
#include "base/statistics.hh"
#include "cpu/testers/memtest/memtest.hh"
#include "debug/MemTest.hh"
@@ -261,14 +262,14 @@ MemTest::tick()
}
//make new request
- unsigned cmd = random() % 100;
- unsigned offset = random() % size;
- unsigned base = random() % 2;
- uint64_t data = random();
- unsigned access_size = random() % 4;
- bool uncacheable = (random() % 100) < percentUncacheable;
+ unsigned cmd = random_mt.random(0, 100);
+ unsigned offset = random_mt.random<unsigned>(0, size - 1);
+ unsigned base = random_mt.random(0, 1);
+ uint64_t data = random_mt.random<uint64_t>();
+ unsigned access_size = random_mt.random(0, 3);
+ bool uncacheable = random_mt.random(0, 100) < percentUncacheable;
- unsigned dma_access_size = random() % 4;
+ unsigned dma_access_size = random_mt.random(0, 3);
//If we aren't doing copies, use id as offset, and do a false sharing
//mem tester
@@ -296,7 +297,8 @@ MemTest::tick()
return;
}
- bool do_functional = (random() % 100 < percentFunctional) && !uncacheable;
+ bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
+ !uncacheable;
Request *req = new Request();
uint8_t *result = new uint8_t[8];