From 2698e739660516af442c0f913eb0e91a00e7b7db Mon Sep 17 00:00:00 2001 From: Andreas Hansson Date: Wed, 3 Sep 2014 07:42:54 -0400 Subject: base: Use the global Mersenne twister throughout This patch tidies up random number generation to ensure that it is done consistently throughout the code base. In essence this involves a clean-up of Ruby, and some code simplifications in the traffic generator. As part of this patch a bunch of skewed distributions (off-by-one etc) have been fixed. Note that a single global random number generator is used, and that the object instantiation order will impact the behaviour (the sequence of numbers will be unaffected, but if module A calles random before module B then they would obviously see a different outcome). The dependency on the instantiation order is true in any case due to the execution-model of gem5, so we leave it as is. Also note that the global ranom generator is not thread safe at this point. Regressions using the memtest, TrafficGen or any Ruby tester are affected and will be updated accordingly. --- .../testers/directedtest/SeriesRequestGenerator.cc | 3 ++- src/cpu/testers/memtest/memtest.cc | 18 ++++++++++-------- src/cpu/testers/networktest/networktest.cc | 7 ++++--- src/cpu/testers/rubytest/Check.cc | 22 ++++++++++++---------- src/cpu/testers/rubytest/CheckTable.cc | 3 ++- src/cpu/testers/traffic_gen/generators.cc | 15 +++++++-------- src/cpu/testers/traffic_gen/traffic_gen.cc | 2 +- src/mem/ruby/common/NetDest.cc | 7 ------- src/mem/ruby/common/NetDest.hh | 1 - src/mem/ruby/common/Set.cc | 16 ---------------- src/mem/ruby/common/Set.hh | 1 - src/mem/ruby/network/MessageBuffer.cc | 7 ++++--- src/mem/ruby/network/simple/PerfectSwitch.cc | 4 +++- src/mem/ruby/slicc_interface/RubySlicc_Util.hh | 6 ------ src/mem/ruby/structures/RubyMemoryControl.cc | 5 +++-- 15 files changed, 48 insertions(+), 69 deletions(-) (limited to 'src') diff --git a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc index f4bb578e3..80523280b 100644 --- a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc +++ b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc @@ -27,6 +27,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include "base/random.hh" #include "cpu/testers/directedtest/DirectedGenerator.hh" #include "cpu/testers/directedtest/RubyDirectedTester.hh" #include "cpu/testers/directedtest/SeriesRequestGenerator.hh" @@ -60,7 +61,7 @@ SeriesRequestGenerator::initiate() Request *req = new Request(m_address, 1, flags, masterId); Packet::Command cmd; - bool do_write = ((random() % 100) < m_percent_writes); + bool do_write = (random_mt.random(0, 100) < m_percent_writes); if (do_write) { cmd = MemCmd::WriteReq; } else { diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index 7f3ff0d03..6dc2ccb73 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -37,6 +37,7 @@ #include #include "base/misc.hh" +#include "base/random.hh" #include "base/statistics.hh" #include "cpu/testers/memtest/memtest.hh" #include "debug/MemTest.hh" @@ -261,14 +262,14 @@ MemTest::tick() } //make new request - unsigned cmd = random() % 100; - unsigned offset = random() % size; - unsigned base = random() % 2; - uint64_t data = random(); - unsigned access_size = random() % 4; - bool uncacheable = (random() % 100) < percentUncacheable; + unsigned cmd = random_mt.random(0, 100); + unsigned offset = random_mt.random(0, size - 1); + unsigned base = random_mt.random(0, 1); + uint64_t data = random_mt.random(); + unsigned access_size = random_mt.random(0, 3); + bool uncacheable = random_mt.random(0, 100) < percentUncacheable; - unsigned dma_access_size = random() % 4; + unsigned dma_access_size = random_mt.random(0, 3); //If we aren't doing copies, use id as offset, and do a false sharing //mem tester @@ -296,7 +297,8 @@ MemTest::tick() return; } - bool do_functional = (random() % 100 < percentFunctional) && !uncacheable; + bool do_functional = (random_mt.random(0, 100) < percentFunctional) && + !uncacheable; Request *req = new Request(); uint8_t *result = new uint8_t[8]; diff --git a/src/cpu/testers/networktest/networktest.cc b/src/cpu/testers/networktest/networktest.cc index 8fff53aa7..c2d34489b 100644 --- a/src/cpu/testers/networktest/networktest.cc +++ b/src/cpu/testers/networktest/networktest.cc @@ -35,6 +35,7 @@ #include #include "base/misc.hh" +#include "base/random.hh" #include "base/statistics.hh" #include "cpu/testers/networktest/networktest.hh" #include "debug/NetworkTest.hh" @@ -143,7 +144,7 @@ NetworkTest::tick() // - send pkt if this number is < injRate*(10^precision) bool send_this_cycle; double injRange = pow((double) 10, (double) precision); - unsigned trySending = random() % (int) injRange; + unsigned trySending = random_mt.random(0, (int) injRange); if (trySending < injRate*injRange) send_this_cycle = true; else @@ -174,7 +175,7 @@ NetworkTest::generatePkt() { unsigned destination = id; if (trafficType == 0) { // Uniform Random - destination = random() % numMemories; + destination = random_mt.random(0, numMemories - 1); } else if (trafficType == 1) { // Tornado int networkDimension = (int) sqrt(numMemories); int my_x = id%networkDimension; @@ -232,7 +233,7 @@ NetworkTest::generatePkt() // MemCmd::Command requestType; - unsigned randomReqType = random() % 3; + unsigned randomReqType = random_mt.random(0, 2); if (randomReqType == 0) { // generate packet for virtual network 0 requestType = MemCmd::ReadReq; diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc index b2b679018..126deba6d 100644 --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -27,6 +27,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include "base/random.hh" #include "cpu/testers/rubytest/Check.hh" #include "debug/RubyTest.hh" #include "mem/ruby/common/SubBlock.hh" @@ -46,7 +47,8 @@ Check::Check(const Address& address, const Address& pc, pickInitiatingNode(); changeAddress(address); m_pc = pc; - m_access_mode = RubyAccessMode(random() % RubyAccessMode_NUM); + m_access_mode = RubyAccessMode(random_mt.random(0, + RubyAccessMode_NUM - 1)); m_store_count = 0; } @@ -57,11 +59,11 @@ Check::initiate() debugPrint(); // currently no protocols support prefetches - if (false && (random() & 0xf) == 0) { + if (false && (random_mt.random(0, 0xf) == 0)) { initiatePrefetch(); // Prefetch from random processor } - if (m_tester_ptr->getCheckFlush() && (random() & 0xff) == 0) { + if (m_tester_ptr->getCheckFlush() && (random_mt.random(0, 0xff) == 0)) { initiateFlush(); // issue a Flush request from random processor } @@ -81,7 +83,7 @@ Check::initiatePrefetch() { DPRINTF(RubyTest, "initiating prefetch\n"); - int index = random() % m_num_readers; + int index = random_mt.random(0, m_num_readers - 1); MasterPort* port = m_tester_ptr->getReadableCpuPort(index); Request::Flags flags; @@ -90,7 +92,7 @@ Check::initiatePrefetch() Packet::Command cmd; // 1 in 8 chance this will be an exclusive prefetch - if ((random() & 0x7) != 0) { + if (random_mt.random(0, 0x7) != 0) { cmd = MemCmd::ReadReq; // if necessary, make the request an instruction fetch @@ -132,7 +134,7 @@ Check::initiateFlush() DPRINTF(RubyTest, "initiating Flush\n"); - int index = random() % m_num_writers; + int index = random_mt.random(0, m_num_writers - 1); MasterPort* port = m_tester_ptr->getWritableCpuPort(index); Request::Flags flags; @@ -161,7 +163,7 @@ Check::initiateAction() DPRINTF(RubyTest, "initiating Action\n"); assert(m_status == TesterStatus_Idle); - int index = random() % m_num_writers; + int index = random_mt.random(0, m_num_writers - 1); MasterPort* port = m_tester_ptr->getWritableCpuPort(index); Request::Flags flags; @@ -222,7 +224,7 @@ Check::initiateCheck() DPRINTF(RubyTest, "Initiating Check\n"); assert(m_status == TesterStatus_Ready); - int index = random() % m_num_readers; + int index = random_mt.random(0, m_num_readers - 1); MasterPort* port = m_tester_ptr->getReadableCpuPort(index); Request::Flags flags; @@ -339,7 +341,7 @@ Check::pickValue() { assert(m_status == TesterStatus_Idle); m_status = TesterStatus_Idle; - m_value = random() & 0xff; // One byte + m_value = random_mt.random(0, 0xff); // One byte m_store_count = 0; } @@ -348,7 +350,7 @@ Check::pickInitiatingNode() { assert(m_status == TesterStatus_Idle || m_status == TesterStatus_Ready); m_status = TesterStatus_Idle; - m_initiatingNode = (random() % m_num_writers); + m_initiatingNode = (random_mt.random(0, m_num_writers - 1)); DPRINTF(RubyTest, "picked initiating node %d\n", m_initiatingNode); m_store_count = 0; } diff --git a/src/cpu/testers/rubytest/CheckTable.cc b/src/cpu/testers/rubytest/CheckTable.cc index f10132c89..df2bf864d 100644 --- a/src/cpu/testers/rubytest/CheckTable.cc +++ b/src/cpu/testers/rubytest/CheckTable.cc @@ -28,6 +28,7 @@ */ #include "base/intmath.hh" +#include "base/random.hh" #include "cpu/testers/rubytest/Check.hh" #include "cpu/testers/rubytest/CheckTable.hh" #include "debug/RubyTest.hh" @@ -107,7 +108,7 @@ Check* CheckTable::getRandomCheck() { assert(m_check_vector.size() > 0); - return m_check_vector[random() % m_check_vector.size()]; + return m_check_vector[random_mt.random(0, m_check_vector.size() - 1)]; } Check* diff --git a/src/cpu/testers/traffic_gen/generators.cc b/src/cpu/testers/traffic_gen/generators.cc index 7c6bab92c..135765fce 100644 --- a/src/cpu/testers/traffic_gen/generators.cc +++ b/src/cpu/testers/traffic_gen/generators.cc @@ -84,7 +84,7 @@ LinearGen::getNextPacket() { // choose if we generate a read or a write here bool isRead = readPercent != 0 && - (readPercent == 100 || random_mt.random(0, 100) < readPercent); + (readPercent == 100 || random_mt.random(0, 100) < readPercent); assert((readPercent == 0 && !isRead) || (readPercent == 100 && isRead) || readPercent != 100); @@ -124,7 +124,7 @@ LinearGen::nextPacketTick(bool elastic, Tick delay) const return MaxTick; } else { // return the time when the next request should take place - Tick wait = random_mt.random(minPeriod, maxPeriod); + Tick wait = random_mt.random(minPeriod, maxPeriod); // compensate for the delay experienced to not be elastic, by // default the value we generate is from the time we are @@ -152,13 +152,13 @@ RandomGen::getNextPacket() { // choose if we generate a read or a write here bool isRead = readPercent != 0 && - (readPercent == 100 || random_mt.random(0, 100) < readPercent); + (readPercent == 100 || random_mt.random(0, 100) < readPercent); assert((readPercent == 0 && !isRead) || (readPercent == 100 && isRead) || readPercent != 100); // address of the request - Addr addr = random_mt.random(startAddr, endAddr - 1); + Addr addr = random_mt.random(startAddr, endAddr - 1); // round down to start address of block addr -= addr % blocksize; @@ -184,15 +184,14 @@ DramGen::getNextPacket() // choose if we generate a read or a write here isRead = readPercent != 0 && - (readPercent == 100 || - random_mt.random(0, 100) < readPercent); + (readPercent == 100 || random_mt.random(0, 100) < readPercent); assert((readPercent == 0 && !isRead) || (readPercent == 100 && isRead) || readPercent != 100); // start by picking a random address in the range - addr = random_mt.random(startAddr, endAddr - 1); + addr = random_mt.random(startAddr, endAddr - 1); // round down to start address of a block, i.e. a DRAM burst addr -= addr % blocksize; @@ -275,7 +274,7 @@ RandomGen::nextPacketTick(bool elastic, Tick delay) const return MaxTick; } else { // return the time when the next request should take place - Tick wait = random_mt.random(minPeriod, maxPeriod); + Tick wait = random_mt.random(minPeriod, maxPeriod); // compensate for the delay experienced to not be elastic, by // default the value we generate is from the time we are diff --git a/src/cpu/testers/traffic_gen/traffic_gen.cc b/src/cpu/testers/traffic_gen/traffic_gen.cc index c1ce0d6d4..cbff712bc 100644 --- a/src/cpu/testers/traffic_gen/traffic_gen.cc +++ b/src/cpu/testers/traffic_gen/traffic_gen.cc @@ -423,7 +423,7 @@ TrafficGen::transition() states[currState]->exit(); // determine next state - double p = random_mt.gen_real1(); + double p = random_mt.random(); assert(currState < transitionMatrix.size()); double cumulative = 0.0; size_t i = 0; diff --git a/src/mem/ruby/common/NetDest.cc b/src/mem/ruby/common/NetDest.cc index b8c490ac5..0a89bda53 100644 --- a/src/mem/ruby/common/NetDest.cc +++ b/src/mem/ruby/common/NetDest.cc @@ -51,13 +51,6 @@ NetDest::addNetDest(const NetDest& netDest) } } -void -NetDest::addRandom() -{ - int i = random()%m_bits.size(); - m_bits[i].addRandom(); -} - void NetDest::setNetDest(MachineType machine, const Set& set) { diff --git a/src/mem/ruby/common/NetDest.hh b/src/mem/ruby/common/NetDest.hh index ba72fe214..9914ca218 100644 --- a/src/mem/ruby/common/NetDest.hh +++ b/src/mem/ruby/common/NetDest.hh @@ -55,7 +55,6 @@ class NetDest void add(MachineID newElement); void addNetDest(const NetDest& netDest); - void addRandom(); void setNetDest(MachineType machine, const Set& set); void remove(MachineID oldElement); void removeNetDest(const NetDest& netDest); diff --git a/src/mem/ruby/common/Set.cc b/src/mem/ruby/common/Set.cc index c674655ab..280fe71b4 100644 --- a/src/mem/ruby/common/Set.cc +++ b/src/mem/ruby/common/Set.cc @@ -102,22 +102,6 @@ Set::addSet(const Set& set) m_p_nArray[i] |= set.m_p_nArray[i]; } -/* - * This function should randomly assign 1 to the bits in the set--it - * should not clear the bits bits first, though? - */ -void -Set::addRandom() -{ - - for (int i = 0; i < m_nArrayLen; i++) { - // this ensures that all 32 bits are subject to random effects, - // as RAND_MAX typically = 0x7FFFFFFF - m_p_nArray[i] |= random() ^ (random() << 4); - } - clearExcess(); -} - /* * This function clears bits that are =1 in the parameter set */ diff --git a/src/mem/ruby/common/Set.hh b/src/mem/ruby/common/Set.hh index 724c5d9e9..bedd44aa6 100644 --- a/src/mem/ruby/common/Set.hh +++ b/src/mem/ruby/common/Set.hh @@ -87,7 +87,6 @@ class Set } void addSet(const Set& set); - void addRandom(); void remove(NodeID index) diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc index 1961765c5..1bc55c2c9 100644 --- a/src/mem/ruby/network/MessageBuffer.cc +++ b/src/mem/ruby/network/MessageBuffer.cc @@ -30,6 +30,7 @@ #include "base/cprintf.hh" #include "base/misc.hh" +#include "base/random.hh" #include "base/stl_helpers.hh" #include "debug/RubyQueue.hh" #include "mem/ruby/network/MessageBuffer.hh" @@ -133,9 +134,9 @@ Cycles random_time() { Cycles time(1); - time += Cycles(random() & 0x3); // [0...3] - if ((random() & 0x7) == 0) { // 1 in 8 chance - time += Cycles(100 + (random() % 0xf)); // 100 + [1...15] + time += Cycles(random_mt.random(0, 3)); // [0...3] + if (random_mt.random(0, 7) == 0) { // 1 in 8 chance + time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15] } return time; } diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc index fa0709496..caf07b3cf 100644 --- a/src/mem/ruby/network/simple/PerfectSwitch.cc +++ b/src/mem/ruby/network/simple/PerfectSwitch.cc @@ -29,6 +29,7 @@ #include #include "base/cast.hh" +#include "base/random.hh" #include "debug/RubyNetwork.hh" #include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/simple/PerfectSwitch.hh" @@ -169,7 +170,8 @@ PerfectSwitch::operateVnet(int vnet) out_queue_length += m_out[out][v]->getSize(); } int value = - (out_queue_length << 8) | (random() & 0xff); + (out_queue_length << 8) | + random_mt.random(0, 0xff); m_link_order[out].m_link = out; m_link_order[out].m_value = value; } diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh index 4398a4a00..5ec34f2dc 100644 --- a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh +++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh @@ -40,12 +40,6 @@ #include "mem/ruby/common/DataBlock.hh" #include "mem/packet.hh" -inline int -random(int n) -{ - return random() % n; -} - inline Cycles zero_time() { return Cycles(0); } inline NodeID diff --git a/src/mem/ruby/structures/RubyMemoryControl.cc b/src/mem/ruby/structures/RubyMemoryControl.cc index bc01c7f94..69fd45fe4 100644 --- a/src/mem/ruby/structures/RubyMemoryControl.cc +++ b/src/mem/ruby/structures/RubyMemoryControl.cc @@ -107,6 +107,7 @@ #include "base/cast.hh" #include "base/cprintf.hh" +#include "base/random.hh" #include "debug/RubyMemory.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Global.hh" @@ -437,7 +438,7 @@ RubyMemoryControl::queueReady(int bank) } if (m_mem_random_arbitrate >= 2) { - if ((random() % 100) < m_mem_random_arbitrate) { + if (random_mt.random(0, 100) < m_mem_random_arbitrate) { m_profiler_ptr->profileMemRandBusy(); return false; } @@ -614,7 +615,7 @@ RubyMemoryControl::executeCycle() // If randomness desired, re-randomize round-robin position each cycle if (m_mem_random_arbitrate) { - m_roundRobin = random() % m_total_banks; + m_roundRobin = random_mt.random(0, m_total_banks - 1); } // For each channel, scan round-robin, and pick an old, ready -- cgit v1.2.3