summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/gems_common/std-includes.hh4
-rw-r--r--src/mem/gems_common/util.cc18
-rw-r--r--src/mem/gems_common/util.hh1
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L1cache.sm51
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L2cache.sm7
-rw-r--r--src/mem/protocol/MESI_CMP_directory-mem.sm242
-rw-r--r--src/mem/protocol/MESI_CMP_directory-msg.sm32
-rw-r--r--src/mem/protocol/MESI_CMP_directory.slicc2
-rw-r--r--src/mem/protocol/MI_example-cache.sm51
-rw-r--r--src/mem/protocol/MI_example-dir.sm458
-rw-r--r--src/mem/protocol/MI_example-dma.sm135
-rw-r--r--src/mem/protocol/MI_example-msg.sm32
-rw-r--r--src/mem/protocol/MI_example.slicc3
-rw-r--r--src/mem/protocol/RubySlicc_ComponentMapping.sm1
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm26
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm58
-rw-r--r--src/mem/protocol/standard_1level_CMP-protocol.sm (renamed from src/mem/ruby/init.hh)24
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.cc14
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.hh4
-rw-r--r--src/mem/ruby/common/Address.hh35
-rw-r--r--src/mem/ruby/common/Consumer.hh3
-rw-r--r--src/mem/ruby/common/DataBlock.cc93
-rw-r--r--src/mem/ruby/common/DataBlock.hh98
-rw-r--r--src/mem/ruby/common/Debug.cc68
-rw-r--r--src/mem/ruby/common/Debug.hh38
-rw-r--r--src/mem/ruby/common/Driver.hh19
-rw-r--r--src/mem/ruby/common/Global.hh25
-rw-r--r--src/mem/ruby/common/Set.cc7
-rw-r--r--src/mem/ruby/common/SubBlock.cc10
-rw-r--r--src/mem/ruby/common/SubBlock.hh15
-rw-r--r--src/mem/ruby/common/TypeDefines.hh23
-rw-r--r--src/mem/ruby/config/MI_example-homogeneous.rb64
-rw-r--r--src/mem/ruby/config/RubyConfig.cc272
-rw-r--r--src/mem/ruby/config/RubyConfig.hh208
-rw-r--r--src/mem/ruby/config/cfg.rb751
-rw-r--r--src/mem/ruby/config/config.hh396
-rw-r--r--src/mem/ruby/config/defaults.rb181
-rw-r--r--src/mem/ruby/config/libruby_cfg_test.cc14
-rw-r--r--src/mem/ruby/config/print_cfg.rb14
-rw-r--r--src/mem/ruby/config/rubyconfig.defaults68
-rw-r--r--src/mem/ruby/config/tester.defaults17
-rw-r--r--src/mem/ruby/eventqueue/RubyEventQueue.cc6
-rw-r--r--src/mem/ruby/filters/AbstractBloomFilter.hh71
-rw-r--r--src/mem/ruby/filters/BlockBloomFilter.cc147
-rw-r--r--src/mem/ruby/filters/BlockBloomFilter.hh82
-rw-r--r--src/mem/ruby/filters/BulkBloomFilter.cc232
-rw-r--r--src/mem/ruby/filters/BulkBloomFilter.hh (renamed from src/mem/ruby/tester/Tester.hh)76
-rw-r--r--src/mem/ruby/filters/GenericBloomFilter.cc150
-rw-r--r--src/mem/ruby/filters/GenericBloomFilter.hh (renamed from src/mem/ruby/system/StoreCache.hh)67
-rw-r--r--src/mem/ruby/filters/H3BloomFilter.cc210
-rw-r--r--src/mem/ruby/filters/H3BloomFilter.hh1258
-rw-r--r--src/mem/ruby/filters/LSB_CountingBloomFilter.cc141
-rw-r--r--src/mem/ruby/filters/LSB_CountingBloomFilter.hh82
-rw-r--r--src/mem/ruby/filters/MultiBitSelBloomFilter.cc191
-rw-r--r--src/mem/ruby/filters/MultiBitSelBloomFilter.hh (renamed from src/mem/ruby/system/PersistentArbiter.hh)92
-rw-r--r--src/mem/ruby/filters/MultiGrainBloomFilter.cc172
-rw-r--r--src/mem/ruby/filters/MultiGrainBloomFilter.hh88
-rw-r--r--src/mem/ruby/filters/NonCountingBloomFilter.cc144
-rw-r--r--src/mem/ruby/filters/NonCountingBloomFilter.hh (renamed from src/mem/ruby/tester/CheckTable.hh)79
-rw-r--r--src/mem/ruby/init.cc191
-rw-r--r--src/mem/ruby/libruby.cc206
-rw-r--r--src/mem/ruby/libruby.hh109
-rw-r--r--src/mem/ruby/libruby_internal.hh13
-rw-r--r--src/mem/ruby/network/Network.cc34
-rw-r--r--src/mem/ruby/network/Network.hh26
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh2
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc38
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh15
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc10
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc11
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh2
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc7
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh4
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc4
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc6
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc2
-rw-r--r--src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc2
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc47
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh16
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh79
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc8
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc4
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/Router.cc16
-rw-r--r--src/mem/ruby/network/simple/CustomTopology.cc140
-rw-r--r--src/mem/ruby/network/simple/CustomTopology.hh17
-rw-r--r--src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc66
-rw-r--r--src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh17
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc12
-rw-r--r--src/mem/ruby/network/simple/PtToPtTopology.cc82
-rw-r--r--src/mem/ruby/network/simple/PtToPtTopology.hh17
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.cc58
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.hh8
-rw-r--r--src/mem/ruby/network/simple/Switch.cc5
-rw-r--r--src/mem/ruby/network/simple/Switch.hh2
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc13
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh5
-rw-r--r--src/mem/ruby/network/simple/Topology.cc485
-rw-r--r--src/mem/ruby/network/simple/Topology.hh21
-rw-r--r--src/mem/ruby/network/simple/Torus2DTopology.cc84
-rw-r--r--src/mem/ruby/network/simple/Torus2DTopology.hh17
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.cc21
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.hh8
-rw-r--r--src/mem/ruby/profiler/Profiler.cc382
-rw-r--r--src/mem/ruby/profiler/Profiler.hh456
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.cc38
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.hh4
-rw-r--r--src/mem/ruby/recorder/TraceRecord.cc41
-rw-r--r--src/mem/ruby/recorder/TraceRecord.hh11
-rw-r--r--src/mem/ruby/recorder/Tracer.cc41
-rw-r--r--src/mem/ruby/recorder/Tracer.hh14
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.cc2
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.hh3
-rw-r--r--src/mem/ruby/slicc_interface/AbstractChip.hh126
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.hh33
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh285
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc25
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Util.hh29
-rw-r--r--src/mem/ruby/storebuffer/hfa.hh103
-rw-r--r--src/mem/ruby/storebuffer/hfatypes.hh80
-rw-r--r--src/mem/ruby/storebuffer/interface.cc67
-rw-r--r--src/mem/ruby/storebuffer/interface.hh46
-rw-r--r--src/mem/ruby/storebuffer/stb_interface.cc73
-rw-r--r--src/mem/ruby/storebuffer/stb_interface.hh42
-rw-r--r--src/mem/ruby/storebuffer/storebuffer.cc564
-rw-r--r--src/mem/ruby/storebuffer/storebuffer.hh150
-rw-r--r--src/mem/ruby/system/AbstractMemOrCache.hh1
-rw-r--r--src/mem/ruby/system/CacheMemory.hh381
-rw-r--r--src/mem/ruby/system/DMASequencer.cc130
-rw-r--r--src/mem/ruby/system/DMASequencer.hh49
-rw-r--r--src/mem/ruby/system/DirectoryMemory.cc165
-rw-r--r--src/mem/ruby/system/DirectoryMemory.hh37
-rw-r--r--src/mem/ruby/system/MemoryControl.cc156
-rw-r--r--src/mem/ruby/system/MemoryControl.hh20
-rw-r--r--src/mem/ruby/system/MemoryVector.hh81
-rw-r--r--src/mem/ruby/system/NodePersistentTable.cc193
-rw-r--r--src/mem/ruby/system/NodePersistentTable.hh99
-rw-r--r--src/mem/ruby/system/PersistentArbiter.cc165
-rw-r--r--src/mem/ruby/system/PersistentTable.cc194
-rw-r--r--src/mem/ruby/system/PersistentTable.hh99
-rw-r--r--src/mem/ruby/system/ProcessorInterface.hh45
-rw-r--r--src/mem/ruby/system/RubyPort.cc5
-rw-r--r--src/mem/ruby/system/RubyPort.hh60
-rw-r--r--src/mem/ruby/system/Sequencer.cc1212
-rw-r--r--src/mem/ruby/system/Sequencer.hh98
-rw-r--r--src/mem/ruby/system/StoreBuffer.cc302
-rw-r--r--src/mem/ruby/system/StoreBuffer.hh121
-rw-r--r--src/mem/ruby/system/StoreCache.cc178
-rw-r--r--src/mem/ruby/system/System.cc384
-rw-r--r--src/mem/ruby/system/System.hh136
-rw-r--r--src/mem/ruby/system/TBETable.hh21
-rw-r--r--src/mem/ruby/tester/BarrierGenerator.cc333
-rw-r--r--src/mem/ruby/tester/BarrierGenerator.hh138
-rw-r--r--src/mem/ruby/tester/Check.cc310
-rw-r--r--src/mem/ruby/tester/Check.hh107
-rw-r--r--src/mem/ruby/tester/CheckTable.cc128
-rw-r--r--src/mem/ruby/tester/DetermGETXGenerator.cc72
-rw-r--r--src/mem/ruby/tester/DetermGETXGenerator.hh17
-rw-r--r--src/mem/ruby/tester/DetermInvGenerator.cc100
-rw-r--r--src/mem/ruby/tester/DetermInvGenerator.hh11
-rw-r--r--src/mem/ruby/tester/DetermSeriesGETSGenerator.cc56
-rw-r--r--src/mem/ruby/tester/DetermSeriesGETSGenerator.hh11
-rw-r--r--src/mem/ruby/tester/DeterministicDriver.cc145
-rw-r--r--src/mem/ruby/tester/DeterministicDriver.hh54
-rw-r--r--src/mem/ruby/tester/Driver_Tester.cc (renamed from src/mem/ruby/slicc_interface/AbstractChip.cc)11
-rw-r--r--src/mem/ruby/tester/Driver_Tester.hh (renamed from src/mem/ruby/tester/RequestGenerator.hh)76
-rw-r--r--src/mem/ruby/tester/EventQueue_Tester.hh118
-rw-r--r--src/mem/ruby/tester/Global_Tester.hh74
-rw-r--r--src/mem/ruby/tester/Instruction.cc51
-rw-r--r--src/mem/ruby/tester/Instruction.hh57
-rw-r--r--src/mem/ruby/tester/RaceyDriver.cc67
-rw-r--r--src/mem/ruby/tester/RaceyDriver.hh32
-rw-r--r--src/mem/ruby/tester/RaceyPseudoThread.cc353
-rw-r--r--src/mem/ruby/tester/RaceyPseudoThread.hh151
-rw-r--r--src/mem/ruby/tester/RequestGenerator.cc220
-rw-r--r--src/mem/ruby/tester/SpecifiedGenerator.cc4
-rw-r--r--src/mem/ruby/tester/SpecifiedGenerator.hh8
-rw-r--r--src/mem/ruby/tester/SyntheticDriver.cc286
-rw-r--r--src/mem/ruby/tester/SyntheticDriver.hh118
-rw-r--r--src/mem/ruby/tester/Tester.cc112
-rw-r--r--src/mem/ruby/tester/main.cc9
-rw-r--r--src/mem/ruby/tester/main.hh9
-rw-r--r--src/mem/ruby/tester/test_framework.cc433
-rw-r--r--src/mem/ruby/tester/test_framework.hh4
-rw-r--r--src/mem/slicc/ast/ASTs.hh1
-rw-r--r--src/mem/slicc/ast/EnqueueStatementAST.cc2
-rw-r--r--src/mem/slicc/ast/MachineAST.cc6
-rw-r--r--src/mem/slicc/ast/MachineAST.hh5
-rw-r--r--src/mem/slicc/ast/MethodCallExprAST.cc26
-rw-r--r--src/mem/slicc/ast/NewExprAST.cc9
-rw-r--r--src/mem/slicc/ast/NewExprAST.hh20
-rw-r--r--src/mem/slicc/ast/ObjDeclAST.cc29
-rw-r--r--src/mem/slicc/parser/lexer.ll9
-rw-r--r--src/mem/slicc/parser/parser.yy28
-rw-r--r--src/mem/slicc/symbols/Func.cc5
-rw-r--r--src/mem/slicc/symbols/Func.hh2
-rw-r--r--src/mem/slicc/symbols/StateMachine.cc439
-rw-r--r--src/mem/slicc/symbols/StateMachine.hh34
-rw-r--r--src/mem/slicc/symbols/Symbol.hh4
-rw-r--r--src/mem/slicc/symbols/SymbolTable.cc717
-rw-r--r--src/mem/slicc/symbols/SymbolTable.hh2
-rw-r--r--src/mem/slicc/symbols/Type.cc94
-rw-r--r--src/mem/slicc/symbols/Type.hh3
-rw-r--r--src/mem/slicc/symbols/Var.hh2
203 files changed, 12394 insertions, 8465 deletions
diff --git a/src/mem/gems_common/std-includes.hh b/src/mem/gems_common/std-includes.hh
index 619214f1d..d6062337f 100644
--- a/src/mem/gems_common/std-includes.hh
+++ b/src/mem/gems_common/std-includes.hh
@@ -26,6 +26,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * $Id: std-includes.hh,v 3.7 2003/02/24 21:05:24 xu Exp $
+ */
+
#ifndef INCLUDES_H
#define INCLUDES_H
diff --git a/src/mem/gems_common/util.cc b/src/mem/gems_common/util.cc
index a64da15a6..403be383f 100644
--- a/src/mem/gems_common/util.cc
+++ b/src/mem/gems_common/util.cc
@@ -30,8 +30,7 @@
* $Id$
*/
-#include <cassert>
-
+#include "assert.hh"
#include "mem/gems_common/util.hh"
// Split a string into a head and tail strings on the specified
@@ -43,7 +42,7 @@ string string_split(string& str, char split_character)
string head = "";
string tail = "";
- unsigned counter = 0;
+ uint counter = 0;
while(counter < str.size()) {
if (str[counter] == split_character) {
counter++;
@@ -91,6 +90,19 @@ float string_to_float(string& str)
return ret;
}
+bool string_to_bool(const string & str)
+{
+ string lower(str);
+ for (size_t i=0;i<str.length();i++)
+ lower[i] = tolower(str[i]);
+ if (lower == "true")
+ return true;
+ else if (lower == "false")
+ return false;
+ else
+ assert(0);
+}
+
// Log functions
int log_int(long long n)
{
diff --git a/src/mem/gems_common/util.hh b/src/mem/gems_common/util.hh
index 7b32f24e8..7afe57a85 100644
--- a/src/mem/gems_common/util.hh
+++ b/src/mem/gems_common/util.hh
@@ -39,6 +39,7 @@ string string_split(string& str, char split_character);
string bool_to_string(bool value);
string int_to_string(int n, bool zero_fill = false, int width = 0);
float string_to_float(string& str);
+bool string_to_bool(const string & str);
int log_int(long long n);
bool is_power_of_2(long long n);
diff --git a/src/mem/protocol/MESI_CMP_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
index 8f2096666..efdc58e1b 100644
--- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
@@ -33,7 +33,7 @@
*/
-machine(L1Cache, "MSI Directory L1 Cache CMP") {
+machine(L1Cache, "MSI Directory L1 Cache CMP") : LATENCY_L1_REQUEST_LATENCY LATENCY_L1_RESPONSE_LATENCY LATENCY_TO_L2_LATENCY {
// NODE L1 CACHE
// From this node's L1 cache TO the network
@@ -136,12 +136,21 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") {
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
- CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
- CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+// CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
+// CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
- MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
+ CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["L1Icache"])';
+
+ CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["L1Dcache"])';
+
+
+// MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
+
+// Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+
+ MessageBuffer mandatoryQueue, ordered="false";
+ Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
int cache_state_to_int(State state);
@@ -290,40 +299,40 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
+ trigger(Event:L1_Replacement, in_msg.LineAddress);
}
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
- if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
+ trigger(Event:L1_Replacement, in_msg.LineAddress);
}
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The tag matches for the L1, so the L1 ask the L2 for it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
- if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
}
}
}
@@ -517,7 +526,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") {
}
action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="TO_L2_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
@@ -527,7 +536,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") {
}
action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
+ enqueue(unblockNetwork_out, ResponseMsg, latency="TO_L2_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
out_msg.Sender := machineID;
diff --git a/src/mem/protocol/MESI_CMP_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
index 43c37e832..2bd9b3ce7 100644
--- a/src/mem/protocol/MESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
@@ -156,9 +156,12 @@ machine(L2Cache, "MOSI Directory L2 Cache CMP") {
bool isPresent(Address);
}
- TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
+ TBETable L2_TBEs, template_hack="<L2Cache_TBE>", no_vector="true";
- CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
+// CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
+
+
+ CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])', no_vector="true";
// inclusive cache, returns L2 entries only
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
diff --git a/src/mem/protocol/MESI_CMP_directory-mem.sm b/src/mem/protocol/MESI_CMP_directory-mem.sm
index 1fcd234fe..84768c333 100644
--- a/src/mem/protocol/MESI_CMP_directory-mem.sm
+++ b/src/mem/protocol/MESI_CMP_directory-mem.sm
@@ -31,23 +31,39 @@
* $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
+// This file is copied from Yasuko Watanabe's prefetch / memory protocol
+// Copied here by aep 12/14/07
-machine(Directory, "Token protocol") {
+
+machine(Directory, "MESI_CMP_filter_directory protocol") : LATENCY_MEMORY_LATENCY LATENCY_TO_MEM_CTRL_LATENCY {
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
+ MessageBuffer dmaRequestFromDir, network="To", virtual_network="4", ordered="true", no_vector="true";
+ MessageBuffer dmaRequestToDir, network="From", virtual_network="5", ordered="true", no_vector="true";
+
+
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, desc="Owner";
+ ID, desc="Intermediate state for DMA_READ when in I";
+ ID_W, desc="Intermediate state for DMA_WRITE when in I";
}
// Events
enumeration(Event, desc="Directory events") {
- Fetch, desc="A GETX arrives";
- Data, desc="A GETS arrives";
+ Fetch, desc="A memory fetch arrives";
+ Data, desc="writeback data arrives";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+//added by SS for dma
+ DMA_READ, desc="A DMA Read memory request";
+ DMA_WRITE, desc="A DMA Write memory request";
+
+
}
// TYPES
@@ -62,10 +78,21 @@ machine(Directory, "Token protocol") {
bool isPresent(Address);
}
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
// ** OBJECTS **
- DirectoryMemory directory, constructor_hack="i";
+// DirectoryMemory directory, constructor_hack="i";
+// MemoryControl memBuffer, constructor_hack="i";
+
+ DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
+
+ MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
+
State getState(Address addr) {
return State:I;
@@ -74,20 +101,44 @@ machine(Directory, "Token protocol") {
void setState(Address addr, State state) {
}
+ bool isGETRequest(CoherenceRequestType type) {
+ return (type == CoherenceRequestType:GETS) ||
+ (type == CoherenceRequestType:GET_INSTR) ||
+ (type == CoherenceRequestType:GETX);
+ }
+
+
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
+ out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaRequestFromDir);
// ** IN_PORTS **
+//added by SS for dma
+ in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
+ if (dmaRequestQueue_in.isReady()) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ if (in_msg.Type == DMARequestType:READ) {
+ trigger(Event:DMA_READ, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == DMARequestType:WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:Fetch, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (isGETRequest(in_msg.Type)) {
trigger(Event:Fetch, in_msg.Address);
} else {
+ DEBUG_EXPR(in_msg);
error("Invalid message");
}
}
@@ -108,27 +159,45 @@ machine(Directory, "Token protocol") {
}
}
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="TO_MEM_CTRL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Sender);
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="TO_MEM_CTRL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
@@ -143,6 +212,42 @@ machine(Directory, "Token protocol") {
responseNetwork_in.dequeue();
}
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Sender;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
@@ -150,17 +255,122 @@ machine(Directory, "Token protocol") {
DEBUG_EXPR(in_msg.DataBlk);
}
}
+//added by SS for dma
+ action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := machineID;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := directory[address].DataBlk;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
+ dmaRequestQueue_in.dequeue();
+ }
+
+ action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+ out_msg.Destination.add(map_Address_to_DMA(address));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ directory[in_msg.PhysicalAddress].DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
+ }
+ }
+
+ action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.OriginalRequestorMachId := machineID;
+ //out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMAResponseType:ACK;
+ out_msg.Destination.add(map_Address_to_DMA(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(z_recycleRequestQueue, "z", desc="recycle request queue") {
+ requestNetwork_in.dequeue();
+ }
// TRANSITIONS
transition(I, Fetch) {
- d_sendData;
+ //d_sendData;
+ qf_queueMemoryFetchRequest;
j_popIncomingRequestQueue;
}
transition(I, Data) {
m_writeDataToMemory;
- a_sendAck;
+ //a_sendAck;
+ qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
+
+ transition(I, Memory_Data) {
+ d_sendData;
+ l_popMemQueue;
+ }
+
+ transition(I, Memory_Ack) {
+ a_sendAck;
+ l_popMemQueue;
+ }
+
+//added by SS for dma support
+ transition(I, DMA_READ, ID) {
+ qf_queueMemoryFetchRequestDMA;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(ID, Memory_Data, I) {
+ dr_sendDMAData;
+ l_popMemQueue;
+ }
+
+ transition(I, DMA_WRITE, ID_W) {
+ dw_writeDMAData;
+ qw_queueMemoryWBRequest_partial;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(ID_W, Memory_Ack, I) {
+ da_sendDMAAck;
+ l_popMemQueue;
+ }
+
+ transition({ID, ID_W}, {Fetch, Data} ) {
+ z_recycleRequestQueue;
+ }
+
+
}
diff --git a/src/mem/protocol/MESI_CMP_directory-msg.sm b/src/mem/protocol/MESI_CMP_directory-msg.sm
index c2d02b59d..e726b062c 100644
--- a/src/mem/protocol/MESI_CMP_directory-msg.sm
+++ b/src/mem/protocol/MESI_CMP_directory-msg.sm
@@ -79,6 +79,38 @@ structure(ResponseMsg, desc="...", interface="NetworkMessage") {
MessageSizeType MessageSize, desc="size category of the message";
}
+enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
+ READ, desc="Memory Read";
+ WRITE, desc="Memory Write";
+ NULL, desc="Invalid";
+}
+
+enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
+ DATA, desc="DATA read";
+ ACK, desc="ACK write";
+ NULL, desc="Invalid";
+}
+
+structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
+ DMARequestType Type, desc="Request type (read/write)";
+ Address PhysicalAddress, desc="Physical address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ int Offset, desc="The offset into the datablock";
+ int Len, desc="The length of the request";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+structure(DMAResponseMsg, desc="...", interface="NetworkMessage") {
+ DMAResponseType Type, desc="Response type (DATA/ACK)";
+ Address PhysicalAddress, desc="Physical address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+
+
/*
GenericRequestType convertToGenericType(CoherenceRequestType type) {
if(type == CoherenceRequestType:PUTX) {
diff --git a/src/mem/protocol/MESI_CMP_directory.slicc b/src/mem/protocol/MESI_CMP_directory.slicc
index 34303f97e..b687873fe 100644
--- a/src/mem/protocol/MESI_CMP_directory.slicc
+++ b/src/mem/protocol/MESI_CMP_directory.slicc
@@ -2,4 +2,6 @@ MESI_CMP_directory-msg.sm
MESI_CMP_directory-L2cache.sm
MESI_CMP_directory-L1cache.sm
MESI_CMP_directory-mem.sm
+MESI_CMP_directory-dma.sm
standard_CMP-protocol.sm
+
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 6c1cb02b6..ae8ab519f 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -1,5 +1,5 @@
-machine(L1Cache, "MI Example") {
+machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY_ISSUE_LATENCY {
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
@@ -15,7 +15,7 @@ machine(L1Cache, "MI Example") {
M, desc="Modified";
MI, desc="Modified, issued PUT";
- IS, desc="Issued request for IFETCH/GETX";
+ IS, desc="Issued request for LOAD/IFETCH";
IM, desc="Issued request for STORE/ATOMIC";
}
@@ -30,6 +30,8 @@ machine(L1Cache, "MI Example") {
Data, desc="Data from network";
Fwd_GETX, desc="Forward from network";
+ Inv, desc="Invalidate request from dir";
+
Replacement, desc="Replace a block";
Writeback_Ack, desc="Ack from the directory for a writeback";
Writeback_Nack, desc="Nack from the directory for a writeback";
@@ -37,21 +39,21 @@ machine(L1Cache, "MI Example") {
// STRUCTURE DEFINITIONS
- MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ MessageBuffer mandatoryQueue, ordered="false";
+ Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
bool Dirty, desc="Is the data dirty (different than memory)?";
- DataBlock DataBlk, desc="data for the block";
+ DataBlock DataBlk, desc="Data in the block";
}
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
- void allocate(Address);
+ void allocate(Address, Entry);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
@@ -62,8 +64,6 @@ machine(L1Cache, "MI Example") {
structure(TBE, desc="...") {
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Trans, desc="Is this block part of a the current transaction?";
- bool Logged, desc="Has this block been logged in the current transaction?";
}
external_type(TBETable) {
@@ -76,7 +76,7 @@ machine(L1Cache, "MI Example") {
// STRUCTURES
- CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS, L1_CACHE_ASSOC, MachineType_L1Cache, int_to_string(i)+"_L1"', abstract_chip_ptr="true";
+ CacheMemory cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
TBETable TBEs, template_hack="<L1Cache_TBE>";
@@ -117,6 +117,11 @@ machine(L1Cache, "MI Example") {
if (cacheMemory.isTagPresent(addr)) {
cacheMemory[addr].CacheState := state;
+ if (state == State:M) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ cacheMemory.changePermission(addr, AccessPermission:Invalid);
+ }
}
}
@@ -138,6 +143,9 @@ machine(L1Cache, "MI Example") {
else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.Address);
}
+ else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.Address);
+ }
else {
error("Unexpected message");
}
@@ -164,13 +172,13 @@ machine(L1Cache, "MI Example") {
peek(mandatoryQueue_in, CacheMsg) {
- if (cacheMemory.isTagPresent(in_msg.Address) == false &&
- cacheMemory.cacheAvail(in_msg.Address) == false ) {
+ if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
+ cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
// make room for the block
- trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
}
else {
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
}
}
}
@@ -229,7 +237,7 @@ machine(L1Cache, "MI Example") {
action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
if (cacheMemory.isTagPresent(address) == false) {
- cacheMemory.allocate(address);
+ cacheMemory.allocate(address, new Entry);
}
}
@@ -244,11 +252,11 @@ machine(L1Cache, "MI Example") {
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
- responseNetwork_in.dequeue();
+ profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
- forwardRequestNetwork_in.dequeue();
+ profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
@@ -292,10 +300,14 @@ machine(L1Cache, "MI Example") {
z_stall;
}
- transition({IS, IM}, Fwd_GETX) {
+ transition({IS, IM}, {Fwd_GETX, Inv}) {
z_stall;
}
+ transition(MI, Inv) {
+ o_popForwardedRequestQueue;
+ }
+
transition(M, Store) {
s_store_hit;
m_popMandatoryQueue;
@@ -306,6 +318,9 @@ machine(L1Cache, "MI Example") {
m_popMandatoryQueue;
}
+ transition(I, Inv) {
+ o_popForwardedRequestQueue;
+ }
transition(I, Store, IM) {
v_allocateTBE;
@@ -344,7 +359,7 @@ machine(L1Cache, "MI Example") {
h_deallocateL1CacheBlock;
}
- transition(M, Replacement, MI) {
+ transition(M, {Replacement,Inv}, MI) {
v_allocateTBE;
b_issuePUT;
x_copyDataFromCacheToTBE;
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index 311f8488b..f597ab73c 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -1,11 +1,12 @@
-machine(Directory, "Directory protocol") {
+machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_DIRECTORY_LATENCY LATENCY_MEMORY_LATENCY {
- MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="true";
+ MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer dmaRequestFromDir, network="To", virtual_network="4", ordered="true";
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="true";
- MessageBuffer unblockToDir, network="From", virtual_network="3", ordered="true";
+ MessageBuffer dmaRequestToDir, network="From", virtual_network="5", ordered="true";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
@@ -13,17 +14,32 @@ machine(Directory, "Directory protocol") {
I, desc="Invalid";
M, desc="Modified";
- MI, desc="Blocked on a writeback";
+ M_DRD, desc="Blocked on an invalidation for a DMA read";
+ M_DWR, desc="Blocked on an invalidation for a DMA write";
+
+ M_DWRI, desc="Intermediate state M_DWR-->I";
+
+ IM, desc="Intermediate state I-->M";
+ MI, desc="Intermediate state M-->I";
+ ID, desc="Intermediate state for DMA_READ when in I";
+ ID_W, desc="Intermediate state for DMA_WRITE when in I";
}
// Events
enumeration(Event, desc="Directory events") {
+ // processor requests
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
PUTX, desc="A PUTX arrives";
PUTX_NotOwner, desc="A PUTX arrives";
- PUTO, desc="A PUTO arrives";
- Unblock, desc="An unblock message arrives";
+
+ // DMA requests
+ DMA_READ, desc="A DMA Read memory request";
+ DMA_WRITE, desc="A DMA Write memory request";
+
+ // Memory Controller
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
}
// TYPES
@@ -39,26 +55,58 @@ machine(Directory, "Directory protocol") {
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
+ void invalidateBlock(Address);
+ }
+
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
+
+ // TBE entries for DMA requests
+ structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ State TBEState, desc="Transient State";
+ DataBlock DataBlk, desc="Data to be written (DMA write only)";
+ int Offset, desc="...";
+ int Len, desc="...";
}
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
// ** OBJECTS **
+ DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
+
+ MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
- DirectoryMemory directory, constructor_hack="i";
+ TBETable TBEs, template_hack="<Directory_TBE>";
State getState(Address addr) {
- return directory[addr].DirectoryState;
+ if (TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (directory.isPresent(addr)) {
+ return directory[addr].DirectoryState;
+ } else {
+ return State:I;
+ }
}
void setState(Address addr, State state) {
+
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+
if (directory.isPresent(addr)) {
if (state == State:I) {
assert(directory[addr].Owner.count() == 0);
assert(directory[addr].Sharers.count() == 0);
- }
-
- if (state == State:M) {
+ } else if (state == State:M) {
assert(directory[addr].Owner.count() == 1);
assert(directory[addr].Sharers.count() == 0);
}
@@ -71,9 +119,25 @@ machine(Directory, "Directory protocol") {
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
+ out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaRequestFromDir);
+//added by SS
+ out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
+ in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
+ if (dmaRequestQueue_in.isReady()) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ if (in_msg.Type == DMARequestType:READ) {
+ trigger(Event:DMA_READ, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == DMARequestType:WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
@@ -88,8 +152,6 @@ machine(Directory, "Directory protocol") {
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address);
}
- } else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.Address);
} else {
error("Invalid message");
}
@@ -97,20 +159,23 @@ machine(Directory, "Directory protocol") {
}
}
- in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
- if (unblockNetwork_in.isReady()) {
- peek(unblockNetwork_in, ResponseMsg) {
- if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- trigger(Event:Unblock, in_msg.Address);
+//added by SS
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
} else {
+ DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
-
-
// Actions
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
@@ -125,6 +190,18 @@ machine(Directory, "Directory protocol") {
}
}
+ action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.OriginalRequestorMachId;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
@@ -141,31 +218,90 @@ machine(Directory, "Directory protocol") {
directory[address].Owner.clear();
}
+// action(d_sendData, "d", desc="Send data to requestor") {
+// peek(requestQueue_in, RequestMsg) {
+// enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+// out_msg.Address := address;
+//
+// if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
+// // out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_CLEAN;
+// out_msg.Type := CoherenceResponseType:DATA;
+// } else {
+// out_msg.Type := CoherenceResponseType:DATA;
+// }
+//
+// out_msg.Sender := machineID;
+// out_msg.Destination.add(in_msg.Requestor);
+// out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+// out_msg.MessageSize := MessageSizeType:Response_Data;
+// }
+// }
+// }
+
action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="TO_MEM_CTRL_LATENCY") {
out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
- if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
- // out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_CLEAN;
- out_msg.Type := CoherenceResponseType:DATA;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
+// action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
+// peek(dmaRequestQueue_in, DMARequestMsg) {
+// enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+// out_msg.PhysicalAddress := address;
+// out_msg.Type := DMAResponseType:DATA;
+// out_msg.DataBlk := directory[in_msg.PhysicalAddress].DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+// out_msg.Destination.add(map_Address_to_DMA(address));
+// out_msg.MessageSize := MessageSizeType:Response_Data;
+// }
+// }
+// }
+
+ action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+ out_msg.Destination.add(map_Address_to_DMA(address));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
+
+
+ action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+ out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
+ action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMAResponseType:ACK;
+ out_msg.Destination.add(map_Address_to_DMA(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(d_deallocateDirectory, "\d", desc="Deallocate Directory Entry") {
+ directory.invalidateBlock(address);
+ }
+
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
peek(requestQueue_in, RequestMsg) {
directory[address].Owner.clear();
@@ -184,26 +320,32 @@ machine(Directory, "Directory protocol") {
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination := directory[in_msg.Address].Owner;
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
+ action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
requestQueue_in.dequeue();
}
- action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue();
+ action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
+ dmaRequestQueue_in.dequeue();
}
- action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
- // peek(unblockNetwork_in, ResponseMsg) {
+ action(l_writeDataToMemory, "l", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
// assert(in_msg.Dirty);
// assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
@@ -213,16 +355,218 @@ machine(Directory, "Directory protocol") {
}
}
+ action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ directory[in_msg.PhysicalAddress].DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
+ }
+ }
+
+ action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
+ directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ TBEs.allocate(address);
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ TBEs[address].Offset := in_msg.Offset;
+ TBEs[address].Len := in_msg.Len;
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(z_recycleRequestQueue, "z", desc="recycle request queue") {
+ requestQueue_in.recycle();
+ }
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ //out_msg.OriginalRequestorMachId := machineID;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := directory[address].DataBlk;
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+// action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+// peek(dmaRequestQueue_in, DMARequestMsg) {
+// enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+// out_msg.Address := address;
+// out_msg.Type := MemoryRequestType:MEMORY_WB;
+// out_msg.OriginalRequestorMachId := machineID;
+// out_msg.DataBlk := in_msg.DataBlk;
+// out_msg.MessageSize := in_msg.MessageSize;
+
+// DEBUG_EXPR(out_msg);
+// }
+// }
+// }
+
+
+ action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ //out_msg.OriginalRequestorMachId := machineID;
+ //out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ //out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+
+
+ action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := in_msg.Prefetch;
+
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
// TRANSITIONS
- transition(I, GETX, M) {
- d_sendData;
+ transition({M_DRD, M_DWR}, GETX) {
+ z_recycleRequestQueue;
+ }
+
+ transition({IM, MI, ID, ID_W}, {GETX, GETS, DMA_READ, DMA_WRITE, PUTX, PUTX_NotOwner} ) {
+ z_recycleRequestQueue;
+ }
+
+ transition(I, GETX, IM) {
+ //d_sendData;
+ qf_queueMemoryFetchRequest;
e_ownerIsRequestor;
i_popIncomingRequestQueue;
}
+ transition(IM, Memory_Data, M) {
+ d_sendData;
+ //e_ownerIsRequestor;
+ l_popMemQueue;
+ }
+
+
+ transition(I, DMA_READ, ID) {
+ //dr_sendDMAData;
+ qf_queueMemoryFetchRequestDMA;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(ID, Memory_Data, I) {
+ dr_sendDMAData;
+ //p_popIncomingDMARequestQueue;
+ l_popMemQueue;
+ }
+
+
+
+ transition(I, DMA_WRITE, ID_W) {
+ dw_writeDMAData;
+// da_sendDMAAck;
+ qw_queueMemoryWBRequest_partial;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(ID_W, Memory_Ack, I) {
+ da_sendDMAAck;
+ l_popMemQueue;
+ }
+
+ transition(M, DMA_READ, M_DRD) {
+ inv_sendCacheInvalidate;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(M_DRD, PUTX, I) {
+ drp_sendDMAData;
+ c_clearOwner;
+ a_sendWriteBackAck;
+ // d_deallocateDirectory;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, DMA_WRITE, M_DWR) {
+ v_allocateTBE;
+ inv_sendCacheInvalidate;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(M_DWR, PUTX, M_DWRI) {
+ dwt_writeDMADataFromTBE;
+ qw_queueMemoryWBRequest_partialTBE;
+ //a_sendWriteBackAck;
+ c_clearOwner;
+ //da_sendDMAAck;
+ w_deallocateTBE;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M_DWRI, Memory_Ack, I) {
+ //dwt_writeDMADataFromTBE;
+ l_sendWriteBackAck;
+ //c_clearOwner;
+ da_sendDMAAck;
+ //w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+
transition(M, GETX, M) {
@@ -231,14 +575,20 @@ machine(Directory, "Directory protocol") {
i_popIncomingRequestQueue;
}
- // transition(M, PUTX, MI) {
- transition(M, PUTX, I) {
+ transition(M, PUTX, MI) {
c_clearOwner;
- l_writeDataToMemory;
- a_sendWriteBackAck;
+// l_writeDataToMemory;
+ l_queueMemoryWBRequest;
+// a_sendWriteBackAck;
+ d_deallocateDirectory;
i_popIncomingRequestQueue;
}
+ transition(MI, Memory_Ack, I) {
+ l_sendWriteBackAck;
+ l_popMemQueue;
+ }
+
transition(M, PUTX_NotOwner, M) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
@@ -246,12 +596,8 @@ machine(Directory, "Directory protocol") {
transition(I, PUTX_NotOwner, I) {
b_sendWriteBackNack;
+ d_deallocateDirectory;
i_popIncomingRequestQueue;
}
-
- transition(MI, Unblock, M) {
- j_popIncomingUnblockQueue;
- }
-
}
diff --git a/src/mem/protocol/MI_example-dma.sm b/src/mem/protocol/MI_example-dma.sm
new file mode 100644
index 000000000..1f929cf9b
--- /dev/null
+++ b/src/mem/protocol/MI_example-dma.sm
@@ -0,0 +1,135 @@
+
+machine(DMA, "DMA Controller") {
+
+ MessageBuffer responseFromDir, network="From", virtual_network="4", ordered="true", no_vector="true";
+ MessageBuffer reqToDirectory, network="To", virtual_network="5", ordered="false", no_vector="true";
+
+ enumeration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, desc="Ready to accept a new request";
+ BUSY_RD, desc="Busy: currently processing a request";
+ BUSY_WR, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ Ack, desc="DMA write to memory completed";
+ }
+
+ external_type(DMASequencer) {
+ void ackCallback();
+ void dataCallback(DataBlock);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
+ DMASequencer dma_sequencer, factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])', no_vector="true";
+ State cur_state, no_vector="true";
+
+ State getState(Address addr) {
+ return cur_state;
+ }
+ void setState(Address addr, State state) {
+ cur_state := state;
+ }
+
+ out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+
+ in_port(dmaRequestQueue_in, DMARequestMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady()) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ if (in_msg.Type == DMARequestType:READ ) {
+ trigger(Event:ReadRequest, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == DMARequestType:WRITE) {
+ trigger(Event:WriteRequest, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady()) {
+ peek( dmaResponseQueue_in, DMAResponseMsg) {
+ if (in_msg.Type == DMAResponseType:ACK) {
+ trigger(Event:Ack, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == DMAResponseType:DATA) {
+ trigger(Event:Data, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg) {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMARequestType:READ;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg) {
+ out_msg.PhysicalAddress := address;
+ out_msg.Type := DMARequestType:WRITE;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ peek (dmaResponseQueue_in, DMAResponseMsg) {
+ dma_sequencer.ackCallback();
+ }
+ }
+
+ action(d_dataCallback, "d", desc="Write data to dma sequencer") {
+ peek (dmaResponseQueue_in, DMAResponseMsg) {
+ dma_sequencer.dataCallback(in_msg.DataBlk);
+ }
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue();
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue();
+ }
+
+ action(z_stall, "z", desc="dma is busy..stall") {
+ // do nothing
+ }
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ s_sendReadRequest;
+ p_popRequestQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ s_sendWriteRequest;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Data, READY) {
+ d_dataCallback;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, Ack, READY) {
+ a_ackCallback;
+ p_popResponseQueue;
+ }
+}
diff --git a/src/mem/protocol/MI_example-msg.sm b/src/mem/protocol/MI_example-msg.sm
index f577d60df..56c2e2e01 100644
--- a/src/mem/protocol/MI_example-msg.sm
+++ b/src/mem/protocol/MI_example-msg.sm
@@ -74,7 +74,6 @@ structure(RequestMsg, desc="...", interface="NetworkMessage") {
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
- int Acks, desc="How many acks to expect";
DataBlock DataBlk, desc="data for the cache line";
MessageSizeType MessageSize, desc="size category of the message";
}
@@ -87,6 +86,35 @@ structure(ResponseMsg, desc="...", interface="NetworkMessage") {
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
- int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
+ READ, desc="Memory Read";
+ WRITE, desc="Memory Write";
+ NULL, desc="Invalid";
+}
+
+enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
+ DATA, desc="DATA read";
+ ACK, desc="ACK write";
+ NULL, desc="Invalid";
+}
+
+structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
+ DMARequestType Type, desc="Request type (read/write)";
+ Address PhysicalAddress, desc="Physical address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ int Offset, desc="The offset into the datablock";
+ int Len, desc="The length of the request";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+structure(DMAResponseMsg, desc="...", interface="NetworkMessage") {
+ DMAResponseType Type, desc="Response type (DATA/ACK)";
+ Address PhysicalAddress, desc="Physical address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
MessageSizeType MessageSize, desc="size category of the message";
}
diff --git a/src/mem/protocol/MI_example.slicc b/src/mem/protocol/MI_example.slicc
index cb1f80135..523668177 100644
--- a/src/mem/protocol/MI_example.slicc
+++ b/src/mem/protocol/MI_example.slicc
@@ -1,4 +1,5 @@
MI_example-msg.sm
MI_example-cache.sm
MI_example-dir.sm
-standard_1level_SMP-protocol.sm
+MI_example-dma.sm
+standard_1level_CMP-protocol.sm
diff --git a/src/mem/protocol/RubySlicc_ComponentMapping.sm b/src/mem/protocol/RubySlicc_ComponentMapping.sm
index 0c205ac22..022bb6862 100644
--- a/src/mem/protocol/RubySlicc_ComponentMapping.sm
+++ b/src/mem/protocol/RubySlicc_ComponentMapping.sm
@@ -30,6 +30,7 @@
// Mapping functions
// NodeID map_address_to_node(Address addr);
+MachineID map_Address_to_DMA(Address addr);
MachineID map_Address_to_Directory(Address addr);
NodeID map_Address_to_DirectoryNode(Address addr);
MachineID map_Address_to_CentralArbiterNode(Address addr);
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index e1c436938..a8b58b96c 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -131,6 +131,12 @@ enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
IO, desc="I/O";
REPLACEMENT, desc="Replacement";
COMMIT, desc="Commit version";
+ LD_XACT, desc="Transactional Load";
+ LDX_XACT, desc="Transactional Load-Intend-To-Modify";
+ ST_XACT, desc="Transactional Store";
+ BEGIN_XACT, desc="Begin Transaction";
+ COMMIT_XACT, desc="Commit Transaction";
+ ABORT_XACT, desc="Abort Transaction";
NULL, desc="Invalid request type";
}
@@ -156,6 +162,12 @@ enumeration(GenericRequestType, desc="...", default="GenericRequestType_NULL") {
WB_ACK, desc="WriteBack ack";
EXE_ACK, desc="Execlusive ack";
COMMIT, desc="Commit version";
+ LD_XACT, desc="Transactional Load";
+ LDX_XACT, desc="Transactional Load-Intend-Modify";
+ ST_XACT, desc="Transactional Store";
+ BEGIN_XACT, desc="Begin Transaction";
+ COMMIT_XACT, desc="Commit Transaction";
+ ABORT_XACT, desc="Abort Transaction";
NULL, desc="null request type";
}
@@ -211,27 +223,15 @@ enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
// CacheMsg
structure(CacheMsg, desc="...", interface="Message") {
- Address Address, desc="Line address for this request";
+ Address LineAddress, desc="Line address for this request";
Address PhysicalAddress, desc="Physical address for this request";
CacheRequestType Type, desc="Type of request (LD, ST, etc)";
Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
AccessModeType AccessMode, desc="user/supervisor access type";
int Size, desc="size in bytes of access";
PrefetchBit Prefetch, desc="Is this a prefetch request";
- // following field only used for MVC
- int Version, desc="Version associated with this request";
- // trans mem fields
- //bool Aborted, desc="This flag is set if the request is from an aborted xact.";
- Address LogicalAddress, desc="Virtual address for this request";
- //int TransactionLevel, desc="Transaction Level of this request";
- //uint64 SequenceNumber, desc="Sequence number of this request";
- int ThreadID, desc="The SMT thread that initiated this request";
- //uint64 RequestTime, desc="The cycle in which this request was issued";
}
-
-
-
// MaskPredictorType
enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
Undefined, desc="Undefined";
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index 3f038031d..aa5648a9e 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -31,6 +31,7 @@
external_type(DataBlock, desc="..."){
void clear();
+ void copyPartial(DataBlock, int, int);
}
external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
@@ -48,8 +49,7 @@ external_type(InPort, primitive="yes") {
external_type(NodeID, default="0");
external_type(MachineID);
-external_type(StoreBuffer);
-
+MessageBuffer getMandatoryQueue(int core_id);
external_type(Set, non_obj="yes") {
void setSize(int);
@@ -96,53 +96,11 @@ external_type(NetDest, non_obj="yes") {
MachineID smallestElement(MachineType);
}
-external_type(PersistentTable) {
- void persistentRequestLock(Address, MachineID, AccessType);
- void persistentRequestUnlock(Address, MachineID);
- bool okToIssueStarving(Address);
- MachineID findSmallest(Address);
- AccessType typeOfSmallest(Address);
- void markEntries(Address);
- bool isLocked(Address);
- int countStarvingForAddress(Address);
- int countReadStarvingForAddress(Address);
-}
-
-external_type(NodePersistentTable) {
- void persistentRequestLock(Address, NodeID, AccessType);
- void persistentRequestUnlock(Address, NodeID);
- bool okToIssueStarving(Address);
- NodeID findSmallest(Address);
- AccessType typeOfSmallest(Address);
- void markEntries(Address);
- bool isLocked(Address);
- int countStarvingForAddress(Address);
- int countReadStarvingForAddress(Address);
-}
-
external_type(Sequencer) {
- void readCallback(Address, DataBlock, GenericMachineType, PrefetchBit, int);
- void writeCallback(Address, DataBlock, GenericMachineType, PrefetchBit, int);
- void readCallback(Address, DataBlock, GenericMachineType, PrefetchBit);
- void writeCallback(Address, DataBlock, GenericMachineType, PrefetchBit);
void readCallback(Address, DataBlock);
void writeCallback(Address, DataBlock);
- void readCallback(Address);
- void writeCallback(Address);
- void readCallbackAbort(Address, int);
- void writeCallbackAbort(Address, int);
- void readConflictCallback(Address);
- void writeConflictCallback(Address);
- void xactCallback(Address);
- void updateCurrentVersion();
- void updateLastCommittedVersion();
- void systemRecovery();
- void systemRestart();
void checkCoherence(Address);
void profileNack(Address, int, int, uint64);
- void resetRequestTime(Address, int);
- bool isReadAborted(Address, int);
- bool isWriteAborted(Address, int);
}
external_type(TimerTable, inport="yes") {
@@ -153,4 +111,16 @@ external_type(TimerTable, inport="yes") {
bool isSet(Address);
}
+external_type(GenericBloomFilter) {
+
+ void clear(int);
+ void increment(Address, int);
+ void decrement(Address, int);
+ void set(Address, int);
+ void unset(Address, int);
+
+ bool isSet(Address, int);
+ int getCount(Address, int);
+}
+
diff --git a/src/mem/ruby/init.hh b/src/mem/protocol/standard_1level_CMP-protocol.sm
index 35af0e603..34da6201f 100644
--- a/src/mem/ruby/init.hh
+++ b/src/mem/protocol/standard_1level_CMP-protocol.sm
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,23 +28,13 @@
*/
/*
- * init.hh
- *
- * Description:
- *
* $Id$
- *
*/
-#ifndef INIT_H
-#define INIT_H
-
-class Driver;
-
-extern void init_variables();
-//extern void init_variables(const char* config_str);
-extern void init_simulator();
-extern void init_simulator(Driver* _driver);
-extern void destroy_simulator();
+// global protocol features
+global(Protocol, desc="Global properties of this protocol",
+ interface = "AbstractProtocol") {
+ bool TwoLevelCache := false;
+ bool CMP := true;
+}
-#endif //INIT_H
diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc
index 9b6fd712e..c0cb90143 100644
--- a/src/mem/ruby/buffers/MessageBuffer.cc
+++ b/src/mem/ruby/buffers/MessageBuffer.cc
@@ -33,6 +33,7 @@
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/system/System.hh"
MessageBuffer::MessageBuffer()
{
@@ -180,17 +181,18 @@ void MessageBuffer::enqueue(const MsgPtr& message, Time delta)
// the plus one is a kluge because of a SLICC issue
if (!m_ordering_set) {
- WARN_EXPR(*this);
+ // WARN_EXPR(*this);
WARN_EXPR(m_name);
ERROR_MSG("Ordering property of this queue has not been set");
}
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
+// printf ("delta %i \n", delta);
assert(delta>0);
Time current_time = g_eventQueue_ptr->getTime();
Time arrival_time = 0;
- if (!RANDOMIZATION || (m_randomization == false)) {
+ if (!RubySystem::getRandomization() || (m_randomization == false)) {
// No randomization
arrival_time = current_time + delta;
@@ -294,7 +296,7 @@ void MessageBuffer::pop()
{
DEBUG_MSG(QUEUE_COMP,MedPrio,"pop from " + m_name);
assert(isReady());
- m_prio_heap.extractMin();
+ Time ready_time = m_prio_heap.extractMin().m_time;
// record previous size and time so the current buffer size isn't adjusted until next cycle
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
m_size_at_cycle_start = m_size;
@@ -321,13 +323,13 @@ void MessageBuffer::clear()
void MessageBuffer::recycle()
{
- // const int RECYCLE_LATENCY = 3;
+ // const int RubyConfig::getRecycleLatency() = 3;
DEBUG_MSG(QUEUE_COMP,MedPrio,"recycling " + m_name);
assert(isReady());
MessageBufferNode node = m_prio_heap.extractMin();
- node.m_time = g_eventQueue_ptr->getTime() + RECYCLE_LATENCY;
+ node.m_time = g_eventQueue_ptr->getTime() + m_recycle_latency;
m_prio_heap.insert(node);
- g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, g_eventQueue_ptr->getTime() + RECYCLE_LATENCY);
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, g_eventQueue_ptr->getTime() + m_recycle_latency);
}
int MessageBuffer::setAndReturnDelayCycles(MsgPtr& message)
diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh
index b58203a93..3ca6790d0 100644
--- a/src/mem/ruby/buffers/MessageBuffer.hh
+++ b/src/mem/ruby/buffers/MessageBuffer.hh
@@ -60,6 +60,7 @@ public:
// Public Methods
static void printConfig(ostream& out) {}
+ void setRecycleLatency(int recycle_latency) { m_recycle_latency = recycle_latency; }
// TRUE if head of queue timestamp <= SystemTime
bool isReady() const {
@@ -105,6 +106,9 @@ public:
void clearStats() { m_not_avail_count = 0; m_msg_counter = 0; }
private:
+ //added by SS
+ int m_recycle_latency;
+
// Private Methods
int setAndReturnDelayCycles(MsgPtr& message);
diff --git a/src/mem/ruby/common/Address.hh b/src/mem/ruby/common/Address.hh
index d72fbf38a..b6899d1ac 100644
--- a/src/mem/ruby/common/Address.hh
+++ b/src/mem/ruby/common/Address.hh
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 1999 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -26,12 +27,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * $Id$
+ */
+
#ifndef ADDRESS_H
#define ADDRESS_H
#include <iomanip>
#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/system/System.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/MachineID.hh"
@@ -63,17 +68,16 @@ public:
physical_address_t maskHighOrderBits(int number) const;
physical_address_t shiftLowOrderBits(int number) const;
physical_address_t getLineAddress() const
- { return bitSelect(RubyConfig::dataBlockBits(), ADDRESS_WIDTH); }
+ { return bitSelect(RubySystem::getBlockSizeBits(), ADDRESS_WIDTH); }
physical_address_t getOffset() const
- { return bitSelect(0, RubyConfig::dataBlockBits()-1); }
+ { return bitSelect(0, RubySystem::getBlockSizeBits()-1); }
- void makeLineAddress() { m_address = maskLowOrderBits(RubyConfig::dataBlockBits()); }
+ void makeLineAddress() { m_address = maskLowOrderBits(RubySystem::getBlockSizeBits()); }
// returns the next stride address based on line address
void makeNextStrideAddress( int stride) {
- m_address = maskLowOrderBits(RubyConfig::dataBlockBits())
- + RubyConfig::dataBlockBytes()*stride;
+ m_address = maskLowOrderBits(RubySystem::getBlockSizeBits())
+ + RubySystem::getBlockSizeBytes()*stride;
}
- void makePageAddress() { m_address = maskLowOrderBits(RubyConfig::pageSizeBits()); }
int getBankSetNum() const;
int getBankSetDist() const;
@@ -103,6 +107,7 @@ private:
inline
Address line_address(const Address& addr) { Address temp(addr); temp.makeLineAddress(); return temp; }
+/*
inline
Address next_stride_address(const Address& addr, int stride) {
Address temp = addr;
@@ -110,9 +115,7 @@ Address next_stride_address(const Address& addr, int stride) {
temp.setAddress(temp.maskHighOrderBits(ADDRESS_WIDTH-RubyConfig::memorySizeBits())); // surpress wrap-around problem
return temp;
}
-
-inline
-Address page_address(const Address& addr) { Address temp(addr); temp.makePageAddress(); return temp; }
+*/
// Output operator declaration
ostream& operator<<(ostream& out, const Address& obj);
@@ -202,17 +205,19 @@ physical_address_t Address::shiftLowOrderBits(int number) const
inline
integer_t Address::memoryModuleIndex() const
{
- integer_t index = bitSelect(RubyConfig::dataBlockBits()+RubyConfig::memoryBits(), ADDRESS_WIDTH);
+ integer_t index = bitSelect(RubySystem::getBlockSizeBits()+RubySystem::getMemorySizeBits(), ADDRESS_WIDTH);
assert (index >= 0);
+ /*
if (index >= RubyConfig::memoryModuleBlocks()) {
- cerr << " memoryBits: " << RubyConfig::memoryBits() << " memorySizeBits: " << RubyConfig::memorySizeBits()
- << " Address: " << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubyConfig::dataBlockBits()) << dec << "]" << flush
+ cerr << " memoryBits: " << RubySystem::getMemorySizeBits() << " memorySizeBits: " << RubySystem::getMemorySizeBits()
+ << " Address: " << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubySystem::getBlockSizeBits()) << dec << "]" << flush
<< "error: limit exceeded. " <<
- " dataBlockBits: " << RubyConfig::dataBlockBits() <<
+ " getDataBlockBits: " << RubySystem::getBlockSizeBits() <<
" memoryModuleBlocks: " << RubyConfig::memoryModuleBlocks() <<
" index: " << index << endl;
}
assert (index < RubyConfig::memoryModuleBlocks());
+ */
return index;
// Index indexHighPortion = address.bitSelect(MEMORY_SIZE_BITS-1, PAGE_SIZE_BITS+NUMBER_OF_MEMORY_MODULE_BITS);
@@ -239,7 +244,7 @@ ADDRESS_WIDTH MEMORY_SIZE_BITS PAGE_SIZE_BITS DATA_BLOCK_BITS
inline
void Address::print(ostream& out) const
{
- out << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubyConfig::dataBlockBits()) << dec << "]" << flush;
+ out << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubySystem::getBlockSizeBits()) << dec << "]" << flush;
}
class Address;
diff --git a/src/mem/ruby/common/Consumer.hh b/src/mem/ruby/common/Consumer.hh
index 34cd7864c..4a14ca20f 100644
--- a/src/mem/ruby/common/Consumer.hh
+++ b/src/mem/ruby/common/Consumer.hh
@@ -53,7 +53,8 @@ public:
virtual ~Consumer() { }
// Public Methods - pure virtual methods
- void triggerWakeup() { Time time = g_eventQueue_ptr->getTime(); if (m_last_wakeup != time) { wakeup(); m_last_wakeup = time; }}
+ // void triggerWakeup() { Time time = g_eventQueue_ptr->getTime(); if (m_last_wakeup != time) { wakeup(); m_last_wakeup = time; }}
+ void triggerWakeup(RubyEventQueue * eventQueue) { Time time = eventQueue->getTime(); if (m_last_wakeup != time) { wakeup(); m_last_wakeup = time; }}
virtual void wakeup() = 0;
virtual void print(ostream& out) const = 0;
const Time& getLastScheduledWakeup() const { return m_last_scheduled_wakeup; }
diff --git a/src/mem/ruby/common/DataBlock.cc b/src/mem/ruby/common/DataBlock.cc
index ce72bc7f4..5e6b8338e 100644
--- a/src/mem/ruby/common/DataBlock.cc
+++ b/src/mem/ruby/common/DataBlock.cc
@@ -1,91 +1,16 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- */
-
#include "mem/ruby/common/DataBlock.hh"
-DataBlock::DataBlock()
-{
- if (DATA_BLOCK || XACT_MEMORY) {
- m_data.setSize(RubyConfig::dataBlockBytes());
- }
- clear();
-}
-
-DataBlock::~DataBlock()
-{
-
-}
-
-void DataBlock::clear()
-{
- int size = m_data.size();
- for (int i = 0; i < size; i++) {
- m_data[i] = 0;
- }
-}
-
-bool DataBlock::equal(const DataBlock& obj) const
+DataBlock &
+DataBlock::operator=(const DataBlock & obj)
{
- bool value = true;
- int size = m_data.size();
- for (int i = 0; i < size; i++) {
- value = value && (m_data[i] == obj.m_data[i]);
- }
- return value;
-}
-
-void DataBlock::print(ostream& out) const
-{
- int size = m_data.size();
- for (int i = 0; i < size; i+=4) {
- out << hex << *((uint32*)(&(m_data[i]))) << " ";
- }
- out << dec << "]" << flush;
-}
-
-uint8 DataBlock::getByte(int whichByte) const
-{
- if (DATA_BLOCK || XACT_MEMORY) {
- return m_data[whichByte];
+ if (this == &obj) {
+ // assert(false);
} else {
- return 0;
- }
-}
-
-void DataBlock::setByte(int whichByte, uint8 data)
-{
- if (DATA_BLOCK || XACT_MEMORY) {
- m_data[whichByte] = data;
+ if (!m_alloc)
+ m_data = new uint8[RubySystem::getBlockSizeBytes()];
+ memcpy(m_data, obj.m_data, RubySystem::getBlockSizeBytes());
+ m_alloc = true;
}
+ return *this;
}
-
diff --git a/src/mem/ruby/common/DataBlock.hh b/src/mem/ruby/common/DataBlock.hh
index 8711cb740..2a0811f76 100644
--- a/src/mem/ruby/common/DataBlock.hh
+++ b/src/mem/ruby/common/DataBlock.hh
@@ -31,29 +31,41 @@
#define DATABLOCK_H
#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/system/System.hh"
#include "mem/gems_common/Vector.hh"
class DataBlock {
-public:
+ public:
// Constructors
- DataBlock();
+ DataBlock() {alloc();}
+ DataBlock(const DataBlock & cp) {
+ m_data = new uint8[RubySystem::getBlockSizeBytes()];
+ memcpy(m_data, cp.m_data, RubySystem::getBlockSizeBytes());
+ m_alloc = true;
+ }
// Destructor
- ~DataBlock();
+ ~DataBlock() { if(m_alloc) delete [] m_data;}
+
+ DataBlock& operator=(const DataBlock& obj);
// Public Methods
+ void assign(uint8* data);
+
void clear();
uint8 getByte(int whichByte) const;
+ const uint8* getData(int offset, int len) const;
void setByte(int whichByte, uint8 data);
+ void setData(uint8* data, int offset, int len);
+ void copyPartial(const DataBlock & dblk, int offset, int len);
bool equal(const DataBlock& obj) const;
void print(ostream& out) const;
private:
- // Private Methods
-
+ void alloc();
// Data Members (m_ prefix)
- Vector<uint8> m_data;
+ uint8* m_data;
+ bool m_alloc;
};
// Output operator declaration
@@ -61,6 +73,78 @@ ostream& operator<<(ostream& out, const DataBlock& obj);
bool operator==(const DataBlock& obj1, const DataBlock& obj2);
+// inline functions for speed
+
+inline
+void DataBlock::assign(uint8* data)
+{
+ delete [] m_data;
+ m_data = data;
+ m_alloc = false;
+}
+
+inline
+void DataBlock::alloc()
+{
+ m_data = new uint8[RubySystem::getBlockSizeBytes()];
+ m_alloc = true;
+ clear();
+}
+
+inline
+void DataBlock::clear()
+{
+ memset(m_data, 0, RubySystem::getBlockSizeBytes());
+}
+
+inline
+bool DataBlock::equal(const DataBlock& obj) const
+{
+ return !memcmp(m_data, obj.m_data, RubySystem::getBlockSizeBytes());
+}
+
+inline
+void DataBlock::print(ostream& out) const
+{
+ int size = RubySystem::getBlockSizeBytes();
+ out << "[ ";
+ for (int i = 0; i < size; i+=4) {
+ out << hex << *((uint32*)(&(m_data[i]))) << " ";
+ }
+ out << dec << "]" << flush;
+}
+
+inline
+uint8 DataBlock::getByte(int whichByte) const
+{
+ return m_data[whichByte];
+}
+
+inline
+const uint8* DataBlock::getData(int offset, int len) const
+{
+ assert(offset + len <= RubySystem::getBlockSizeBytes());
+ return &m_data[offset];
+}
+
+inline
+void DataBlock::setByte(int whichByte, uint8 data)
+{
+ m_data[whichByte] = data;
+}
+
+inline
+void DataBlock::setData(uint8* data, int offset, int len)
+{
+ assert(offset + len <= RubySystem::getBlockSizeBytes());
+ memcpy(&m_data[offset], data, len);
+}
+
+inline
+void DataBlock::copyPartial(const DataBlock & dblk, int offset, int len)
+{
+ setData(&dblk.m_data[offset], offset, len);
+}
// ******************* Definitions *******************
diff --git a/src/mem/ruby/common/Debug.cc b/src/mem/ruby/common/Debug.cc
index 02f4069ee..c1a6e16d0 100644
--- a/src/mem/ruby/common/Debug.cc
+++ b/src/mem/ruby/common/Debug.cc
@@ -38,36 +38,28 @@
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Debug.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include "mem/gems_common/util.hh"
class Debug;
extern Debug* g_debug_ptr;
std::ostream * debug_cout_ptr;
-struct DebugComponentData
+bool Debug::m_protocol_trace = false;
+
+// component character list
+const char DEFINE_COMP_CHAR[] =
{
- const char *desc;
- const char ch;
+#undef DEFINE_COMP
+#define DEFINE_COMP(component, character, description) character,
+#include "Debug.def"
};
-// component character list
-DebugComponentData debugComponents[] =
+// component description list
+const char* DEFINE_COMP_DESCRIPTION[] =
{
- {"System", 's' },
- {"Node", 'N' },
- {"Queue", 'q' },
- {"Event Queue", 'e' },
- {"Network", 'n' },
- {"Sequencer", 'S' },
- {"Tester", 't' },
- {"Generated", 'g' },
- {"SLICC", 'l' },
- {"Network Queues", 'Q' },
- {"Time", 'T' },
- {"Network Internals", 'i' },
- {"Store Buffer", 'b' },
- {"Cache", 'c' },
- {"Predictor", 'p' },
- {"Allocator", 'a' },
+#undef DEFINE_COMP
+#define DEFINE_COMP(component, character, description) description,
+#include "Debug.def"
};
extern "C" void changeDebugVerbosity(VerbosityLevel vb);
@@ -83,6 +75,32 @@ void changeDebugFilter(int filter)
g_debug_ptr->setFilter(filter);
}
+Debug::Debug()
+{
+ m_verbosityLevel = No_Verb;
+ m_starting_cycle = ~0;
+ clearFilter();
+ debug_cout_ptr = &cout;
+}
+
+Debug::Debug( const string & name, const vector<string> & argv )
+{
+ for (size_t i=0;i<argv.size();i+=2){
+ if (argv[i] == "filter_string")
+ setFilterString( argv[i+1].c_str() );
+ else if (argv[i] == "verbosity_string")
+ setVerbosityString( argv[i+1].c_str() );
+ else if (argv[i] == "start_time")
+ m_starting_cycle = atoi( argv[i+1].c_str() );
+ else if (argv[i] == "output_filename")
+ setDebugOutputFile( argv[i+1].c_str() );
+ else if (argv[i] == "protocol_trace")
+ m_protocol_trace = string_to_bool(argv[i+1]);
+ else
+ assert(0);
+ }
+}
+
Debug::Debug( const char *filterString, const char *verboseString,
Time filterStartTime, const char *filename )
{
@@ -208,7 +226,7 @@ bool Debug::checkFilter(char ch)
{
for (int i=0; i<NUMBER_OF_COMPS; i++) {
// Look at all components to find a character match
- if (debugComponents[i].ch == ch) {
+ if (DEFINE_COMP_CHAR[i] == ch) {
// We found a match - return no error
return false; // no error
}
@@ -274,9 +292,9 @@ bool Debug::addFilter(char ch)
{
for (int i=0; i<NUMBER_OF_COMPS; i++) {
// Look at all components to find a character match
- if (debugComponents[i].ch == ch) {
+ if (DEFINE_COMP_CHAR[i] == ch) {
// We found a match - update the filter bit mask
- cout << " Debug: Adding to filter: '" << ch << "' (" << debugComponents[i].desc << ")" << endl;
+ cout << " Debug: Adding to filter: '" << ch << "' (" << DEFINE_COMP_DESCRIPTION[i] << ")" << endl;
m_filter |= (1 << i);
return false; // no error
}
@@ -302,7 +320,7 @@ void Debug::usageInstructions(void)
{
cerr << "Debug components: " << endl;
for (int i=0; i<NUMBER_OF_COMPS; i++) {
- cerr << " " << debugComponents[i].ch << ": " << debugComponents[i].desc << endl;
+ cerr << " " << DEFINE_COMP_CHAR[i] << ": " << DEFINE_COMP_DESCRIPTION[i] << endl;
}
}
diff --git a/src/mem/ruby/common/Debug.hh b/src/mem/ruby/common/Debug.hh
index ad88431ef..54e449908 100644
--- a/src/mem/ruby/common/Debug.hh
+++ b/src/mem/ruby/common/Debug.hh
@@ -31,37 +31,21 @@
* $Id$
*/
-#ifndef __MEM_RUBY_DEBUG_HH__
-#define __MEM_RUBY_DEBUG_HH__
+#ifndef DEBUG_H
+#define DEBUG_H
#include <unistd.h>
#include <iostream>
-#include "config/ruby_debug.hh"
-#include "mem/ruby/common/Global.hh"
-
extern std::ostream * debug_cout_ptr;
// component enumeration
enum DebugComponents
{
- SYSTEM_COMP,
- NODE_COMP,
- QUEUE_COMP,
- EVENTQUEUE_COMP,
- NETWORK_COMP,
- SEQUENCER_COMP,
- TESTER_COMP,
- GENERATED_COMP,
- SLICC_COMP,
- NETWORKQUEUE_COMP,
- TIME_COMP,
- NETWORK_INTERNALS_COMP,
- STOREBUFFER_COMP,
- CACHE_COMP,
- PREDICTOR_COMP,
- ALLOCATOR_COMP,
- NUMBER_OF_COMPS
+#undef DEFINE_COMP
+#define DEFINE_COMP(component, character, description) component,
+#include "Debug.def"
+ NUMBER_OF_COMPS
};
enum PriorityLevel {HighPrio, MedPrio, LowPrio};
@@ -70,6 +54,8 @@ enum VerbosityLevel {No_Verb, Low_Verb, Med_Verb, High_Verb};
class Debug {
public:
// Constructors
+ Debug();
+ Debug( const string & name, const vector<string> & argv );
Debug( const char *filterString, const char *verboseString,
Time filterStartTime, const char *filename );
@@ -77,6 +63,7 @@ public:
~Debug();
// Public Methods
+ static bool getProtocolTrace() { return m_protocol_trace; }
bool validDebug(int module, PriorityLevel priority);
void printVerbosity(ostream& out) const;
void setVerbosity(VerbosityLevel vb);
@@ -108,6 +95,7 @@ private:
Debug& operator=(const Debug& obj);
// Data Members (m_ prefix)
+ static bool m_protocol_trace;
VerbosityLevel m_verbosityLevel;
int m_filter;
Time m_starting_cycle;
@@ -155,7 +143,7 @@ const bool ASSERT_FLAG = true;
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << endl << flush;\
- if(isatty(STDERR_FILENO)) {\
+ if(isatty(STDIN_FILENO)) {\
cerr << "At this point you might want to attach a debug to ";\
cerr << "the running and get to the" << endl;\
cerr << "crash site; otherwise press enter to continue" << endl;\
@@ -176,7 +164,7 @@ const bool ASSERT_FLAG = true;
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << endl << flush;\
- if(isatty(STDERR_FILENO)) {\
+ if(isatty(STDIN_FILENO)) {\
cerr << "press enter to continue" << endl;\
cerr << "PID: " << getpid();\
cerr << endl << flush; \
@@ -303,5 +291,5 @@ const bool ASSERT_FLAG = true;
}\
}
-#endif // __MEM_RUBY_DEBUG_HH__
+#endif //DEBUG_H
diff --git a/src/mem/ruby/common/Driver.hh b/src/mem/ruby/common/Driver.hh
index 38bdbbf91..db8279fa5 100644
--- a/src/mem/ruby/common/Driver.hh
+++ b/src/mem/ruby/common/Driver.hh
@@ -27,6 +27,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
#ifndef DRIVER_H
#define DRIVER_H
@@ -34,7 +41,6 @@
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/protocol/CacheRequestType.hh"
-#include "mem/packet.hh"
class RubySystem;
class SubBlock;
@@ -52,10 +58,15 @@ public:
// Public Methods
virtual void get_network_config() {}
- virtual void hitCallback(Packet* pkt) = 0;
+ virtual void dmaHitCallback() = 0;
+ virtual void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) = 0; // Called by sequencer
+ virtual void conflictCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) { assert(0) }; // Called by sequencer
virtual integer_t getInstructionCount(int procID) const { return 1; }
virtual integer_t getCycleCount(int procID) const { return 1; }
-
+ virtual void addThreadDependency(int procID, int requestor_thread, int conflict_thread) const { assert(0);}
+ virtual int inTransaction(int procID, int thread ) const{
+ cout << "Driver.hh inTransaction " << endl;
+return false; } //called by Sequencer
virtual void printDebug(){} //called by Sequencer
virtual void printStats(ostream& out) const = 0;
@@ -63,6 +74,8 @@ public:
virtual void printConfig(ostream& out) const = 0;
+ //virtual void abortCallback(NodeID proc){}
+
virtual integer_t readPhysicalMemory(int procID, physical_address_t address,
int len ){ ASSERT(0); return 0; }
diff --git a/src/mem/ruby/common/Global.hh b/src/mem/ruby/common/Global.hh
index 205b2bcb2..591ffed1e 100644
--- a/src/mem/ruby/common/Global.hh
+++ b/src/mem/ruby/common/Global.hh
@@ -32,9 +32,10 @@
*
* */
-#ifndef __MEM_RUBY_GLOBAL_HH__
-#define __MEM_RUBY_GLOBAL_HH__
+#ifndef GLOBAL_H
+#define GLOBAL_H
+/*
#ifdef SINGLE_LEVEL_CACHE
const bool TWO_LEVEL_CACHE = false;
#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // currently all protocols require L1s == nodes
@@ -60,24 +61,11 @@ const bool TWO_LEVEL_CACHE = true;
#define DIRECTORY_MEMBER_VARIABLE m_Directory_directory_vec[m_version]
#define TBE_TABLE_MEMBER_VARIABLE m_L1Cache_TBEs_vec[m_version]
-typedef unsigned char uint8;
-typedef unsigned int uint32;
-typedef unsigned long long uint64;
-
-typedef signed char int8;
-typedef int int32;
-typedef long long int64;
-
-typedef long long integer_t;
-typedef unsigned long long uinteger_t;
+*/
-typedef int64 Time;
-typedef uint64 physical_address_t;
-typedef uint64 la_t;
-typedef uint64 pa_t;
-typedef integer_t simtime_t;
// external includes for all classes
+#include "mem/ruby/common/TypeDefines.hh"
#include "mem/gems_common/std-includes.hh"
#include "mem/ruby/common/Debug.hh"
@@ -85,6 +73,7 @@ typedef integer_t simtime_t;
typedef Time LogicalTime;
typedef int64 Index; // what the address bit ripper returns
typedef int word; // one word of a cache line
+typedef unsigned int uint;
typedef int SwitchID;
typedef int LinkID;
@@ -105,5 +94,5 @@ extern inline int max_tokens()
}
-#endif // __MEM_RUBY_GLOBAL_HH__
+#endif //GLOBAL_H
diff --git a/src/mem/ruby/common/Set.cc b/src/mem/ruby/common/Set.cc
index 4cb40a246..6f01c4043 100644
--- a/src/mem/ruby/common/Set.cc
+++ b/src/mem/ruby/common/Set.cc
@@ -40,6 +40,7 @@
// set sizes
#include "mem/ruby/common/Set.hh"
+#include "mem/ruby/system/System.hh"
#include "mem/ruby/config/RubyConfig.hh"
#if __amd64__ || __LP64__
@@ -51,7 +52,7 @@
Set::Set()
{
m_p_nArray = NULL;
- setSize(RubyConfig::numberOfProcessors());
+ setSize(RubySystem::getNumberOfSequencers());
}
// copy constructor
@@ -511,7 +512,7 @@ void Set::setSize(int size)
#endif // __32BITS__
// decide whether to use dynamic or static alloction
- if(m_nArrayLen<=NUMBER_WORDS_PER_SET) { // constant defined in RubyConfig.h
+ if(m_nArrayLen<=NUMBER_WORDS_PER_SET) { // constant defined in RubyConfig.hh
// its OK to use the static allocation, and it will
// probably be faster (as m_nArrayLen is already in the
// cache and they will probably share the same cache line)
@@ -560,7 +561,7 @@ void Set::print(ostream& out) const
return;
}
char buff[24];
- out << "[Set 0x ";
+ out << "[Set (" << m_nSize << ") 0x ";
for (int i=m_nArrayLen-1; i>=0; i--) {
#ifdef __32BITS__
sprintf(buff,"%08X ",m_p_nArray[i]);
diff --git a/src/mem/ruby/common/SubBlock.cc b/src/mem/ruby/common/SubBlock.cc
index 568d3106a..de40e3f7d 100644
--- a/src/mem/ruby/common/SubBlock.cc
+++ b/src/mem/ruby/common/SubBlock.cc
@@ -42,16 +42,6 @@ SubBlock::SubBlock(const Address& addr, int size)
}
}
-SubBlock::SubBlock(const Address& addr, const Address& logicalAddress, int size)
-{
- m_address = addr;
- m_logicalAddress = logicalAddress;
- setSize(size);
- for(int i=0; i<size; i++) {
- setByte(i, 0);
- }
-}
-
void SubBlock::internalMergeFrom(const DataBlock& data)
{
int size = getSize();
diff --git a/src/mem/ruby/common/SubBlock.hh b/src/mem/ruby/common/SubBlock.hh
index 2943bb886..3bc09e1d0 100644
--- a/src/mem/ruby/common/SubBlock.hh
+++ b/src/mem/ruby/common/SubBlock.hh
@@ -46,16 +46,13 @@ public:
// Constructors
SubBlock() { }
SubBlock(const Address& addr, int size);
- SubBlock(const Address& addr, const Address& logicalAddress, int size);
// Destructor
~SubBlock() { }
// Public Methods
const Address& getAddress() const { return m_address; }
- const Address& getLogicalAddress() const { return m_logicalAddress; }
void setAddress(const Address& addr) { m_address = addr; }
- void setLogicalAddress(const Address& addr) { m_logicalAddress = addr; }
int getSize() const { return m_data.size(); }
void setSize(int size) { m_data.setSize(size); }
@@ -68,24 +65,18 @@ public:
// Merging to and from DataBlocks - We only need to worry about
// updates when we are using DataBlocks
- void mergeTo(DataBlock& data) const { if (DATA_BLOCK) { internalMergeTo(data); } }
- void mergeFrom(const DataBlock& data) { if (DATA_BLOCK) { internalMergeFrom(data); } }
+ void mergeTo(DataBlock& data) const { internalMergeTo(data); }
+ void mergeFrom(const DataBlock& data) { internalMergeFrom(data); }
void print(ostream& out) const;
private:
- // Private Methods
- // SubBlock(const SubBlock& obj);
- // SubBlock& operator=(const SubBlock& obj);
- // bool bytePresent(const Address& addr) { return ((addr.getAddress() >= m_address.getAddress()) && (addr.getAddress() < (m_address.getAddress()+getSize()))); }
- // uint8 getByte(const Address& addr) { return m_data[addr.getAddress() - m_address.getAddress()]; }
void internalMergeTo(DataBlock& data) const;
void internalMergeFrom(const DataBlock& data);
// Data Members (m_ prefix)
Address m_address;
- Address m_logicalAddress;
- Vector<unsigned> m_data;
+ Vector<uint> m_data;
};
// Output operator declaration
diff --git a/src/mem/ruby/common/TypeDefines.hh b/src/mem/ruby/common/TypeDefines.hh
new file mode 100644
index 000000000..97b3cd8a4
--- /dev/null
+++ b/src/mem/ruby/common/TypeDefines.hh
@@ -0,0 +1,23 @@
+
+#ifndef TYPEDEFINES_H
+#define TYPEDEFINES_H
+
+
+typedef unsigned char uint8;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+
+typedef signed char int8;
+typedef int int32;
+typedef long long int64;
+
+typedef long long integer_t;
+typedef unsigned long long uinteger_t;
+
+typedef int64 Time;
+typedef uint64 physical_address_t;
+typedef uint64 la_t;
+typedef uint64 pa_t;
+typedef integer_t simtime_t;
+
+#endif
diff --git a/src/mem/ruby/config/MI_example-homogeneous.rb b/src/mem/ruby/config/MI_example-homogeneous.rb
new file mode 100644
index 000000000..8c2eef009
--- /dev/null
+++ b/src/mem/ruby/config/MI_example-homogeneous.rb
@@ -0,0 +1,64 @@
+#!/usr/bin/ruby
+#
+# Creates a homogeneous CMP system with a single unified cache per
+# core and a crossbar network. Uses the default parameters listed
+# below, which can be overridden if a wrapper script sets the hash
+# libruby_args.
+#
+
+require "cfg.rb"
+
+# default values
+
+num_cores = 16
+L1_CACHE_SIZE_KB = 32
+L1_CACHE_ASSOC = 8
+L1_CACHE_LATENCY = "auto"
+num_memories = 2
+memory_size_mb = 1024
+NUM_DMA = 1
+
+# check for overrides
+
+for i in 0..$*.size-1 do
+ if $*[i] == "-p"
+ num_cores = $*[i+1].to_i
+ i = i+1
+ elsif $*[i] == "-m"
+ num_memories = $*[i+1].to_i
+ i = i+1
+ elsif $*[i] == "-s"
+ memory_size_mb = $*[i+1].to_i
+ i = i + 1
+ end
+end
+
+net_ports = Array.new
+iface_ports = Array.new
+
+num_cores.times { |n|
+ cache = SetAssociativeCache.new("l1u_"+n.to_s, L1_CACHE_SIZE_KB, L1_CACHE_LATENCY, L1_CACHE_ASSOC, "PSEUDO_LRU")
+ sequencer = Sequencer.new("Sequencer_"+n.to_s, cache, cache)
+ iface_ports << sequencer
+ net_ports << MI_example_CacheController.new("L1CacheController_"+n.to_s,
+ "L1Cache",
+ [cache],
+ sequencer)
+}
+num_memories.times { |n|
+ directory = DirectoryMemory.new("DirectoryMemory_"+n.to_s, memory_size_mb/num_memories)
+ memory_control = MemoryControl.new("MemoryControl_"+n.to_s)
+ net_ports << MI_example_DirectoryController.new("DirectoryController_"+n.to_s,
+ "Directory",
+ directory, memory_control)
+}
+NUM_DMA.times { |n|
+ dma_sequencer = DMASequencer.new("DMASequencer_"+n.to_s)
+ iface_ports << dma_sequencer
+ net_ports << DMAController.new("DMAController_"+n.to_s, "DMA", dma_sequencer)
+}
+
+topology = CrossbarTopology.new("theTopology", net_ports)
+on_chip_net = Network.new("theNetwork", topology)
+
+RubySystem.init(iface_ports, on_chip_net)
diff --git a/src/mem/ruby/config/RubyConfig.cc b/src/mem/ruby/config/RubyConfig.cc
index fe58a74d3..987a3d81d 100644
--- a/src/mem/ruby/config/RubyConfig.cc
+++ b/src/mem/ruby/config/RubyConfig.cc
@@ -36,135 +36,191 @@
*
*/
-#include "config/ruby_debug.hh"
#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/protocol/protocol_name.hh"
+//#include "mem/protocol/protocol_name.hh"
#include "mem/gems_common/util.hh"
-#include "mem/protocol/Protocol.hh"
+
+#define CONFIG_DEF_FILE "mem/ruby/config/config.hh"
+
+#define ERROR_MSG(MESSAGE)\
+{\
+ cerr << "Fatal Error: in fn "\
+ << __PRETTY_FUNCTION__ << " in "\
+ << __FILE__ << ":"\
+ << __LINE__ << ": "\
+ << (MESSAGE) << endl << flush;\
+ abort();\
+}
+
+// declare all configuration variables
+#define PARAM_BOOL( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ bool RubyConfig::m_##NAME = DEFAULT_VALUE;
+#define PARAM_STRING( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ const char* RubyConfig::m_##NAME = DEFAULT_VALUE;
+#define PARAM_ULONG( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ uint64 RubyConfig::m_##NAME = DEFAULT_VALUE;
+#define PARAM( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ int RubyConfig::m_##NAME = DEFAULT_VALUE;
+#define PARAM_ARRAY( NAME, TYPE, DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ TYPE* RubyConfig::m_##NAME = NULL;
+#define PARAM_ARRAY2D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ TYPE** RubyConfig::m_##NAME = NULL;
+#define PARAM_ARRAY3D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, D3_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ TYPE*** RubyConfig::m_##NAME = NULL;
+#include CONFIG_DEF_FILE
+#undef PARAM_BOOL
+#undef PARAM_STRING
+#undef PARAM_ULONG
+#undef PARAM
+#undef PARAM_ARRAY
+#undef PARAM_ARRAY2D
+#undef PARAM_ARRAY3D
#define CHECK_POWER_OF_2(N) { if (!is_power_of_2(N)) { ERROR_MSG(#N " must be a power of 2."); }}
#define CHECK_ZERO(N) { if (N != 0) { ERROR_MSG(#N " must be zero at initialization."); }}
#define CHECK_NON_ZERO(N) { if (N == 0) { ERROR_MSG(#N " must be non-zero."); }}
+uint32 RubyConfig::m_data_block_mask;
+
+void RubyConfig::reset()
+{
+ #define PARAM_BOOL( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = DEFAULT_VALUE;
+#define PARAM_STRING( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = DEFAULT_VALUE;
+#define PARAM_ULONG( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = DEFAULT_VALUE;
+#define PARAM( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = DEFAULT_VALUE;
+#define PARAM_ARRAY( NAME, TYPE, DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = new TYPE[DEFAULT_ARRAY_SIZE]; \
+ for (int i=0; i<DEFAULT_ARRAY_SIZE; i++) \
+ m_##NAME[i] = DEFAULT_VALUE;
+#define PARAM_ARRAY2D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = new TYPE*[D1_DEFAULT_ARRAY_SIZE]; \
+ for (int i=0; i<D1_DEFAULT_ARRAY_SIZE; i++) { \
+ m_##NAME[i] = new TYPE[D2_DEFAULT_ARRAY_SIZE]; \
+ for (int j=0; j<D2_DEFAULT_ARRAY_SIZE; j++) \
+ m_##NAME[i][j] = DEFAULT_VALUE; \
+ }
+#define PARAM_ARRAY3D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, D3_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ m_##NAME = new TYPE**[D1_DEFAULT_ARRAY_SIZE]; \
+ for (int i=0; i<D1_DEFAULT_ARRAY_SIZE; i++) { \
+ m_##NAME[i] = new TYPE*[D2_DEFAULT_ARRAY_SIZE]; \
+ for (int j=0; j<D2_DEFAULT_ARRAY_SIZE; j++) { \
+ m_##NAME[i][j] = new TYPE[D3_DEFAULT_ARRAY_SIZE]; \
+ for (int k=0; k<D3_DEFAULT_ARRAY_SIZE; k++) \
+ m_##NAME[i][j][k] = DEFUALT_VALUE; \
+ } \
+ }
+#include CONFIG_DEF_FILE
+#undef PARAM_BOOL
+#undef PARAM_STRING
+#undef PARAM_ULONG
+#undef PARAM
+#undef PARAM_ARRAY
+#undef PARAM_ARRAY2D
+#undef PARAM_ARRAY3D
+}
void RubyConfig::init()
{
+ /*
// MemoryControl:
- CHECK_NON_ZERO(MEM_BUS_CYCLE_MULTIPLIER);
- CHECK_NON_ZERO(BANKS_PER_RANK);
- CHECK_NON_ZERO(RANKS_PER_DIMM);
- CHECK_NON_ZERO(DIMMS_PER_CHANNEL);
- CHECK_NON_ZERO(BANK_QUEUE_SIZE);
- CHECK_NON_ZERO(BANK_BUSY_TIME);
- CHECK_NON_ZERO(MEM_CTL_LATENCY);
- CHECK_NON_ZERO(REFRESH_PERIOD);
- CHECK_NON_ZERO(BASIC_BUS_BUSY_TIME);
-
- CHECK_POWER_OF_2(BANKS_PER_RANK);
- CHECK_POWER_OF_2(RANKS_PER_DIMM);
- CHECK_POWER_OF_2(DIMMS_PER_CHANNEL);
-
- CHECK_NON_ZERO(g_MEMORY_SIZE_BYTES);
- CHECK_NON_ZERO(g_DATA_BLOCK_BYTES);
- CHECK_NON_ZERO(g_PAGE_SIZE_BYTES);
- CHECK_NON_ZERO(g_NUM_PROCESSORS);
- CHECK_NON_ZERO(g_PROCS_PER_CHIP);
- if(g_NUM_SMT_THREADS == 0){ //defaults to single-threaded
- g_NUM_SMT_THREADS = 1;
- }
- if (g_NUM_L2_BANKS == 0) { // defaults to number of ruby nodes
- g_NUM_L2_BANKS = g_NUM_PROCESSORS;
+ CHECK_NON_ZERO(m_MEM_BUS_CYCLE_MULTIPLIER);
+ CHECK_NON_ZERO(m_BANKS_PER_RANK);
+ CHECK_NON_ZERO(m_RANKS_PER_DIMM);
+ CHECK_NON_ZERO(m_DIMMS_PER_CHANNEL);
+ CHECK_NON_ZERO(m_BANK_QUEUE_SIZE);
+ CHECK_NON_ZERO(m_BankBusyTime);
+ CHECK_NON_ZERO(m_MEM_CTL_LATENCY);
+ CHECK_NON_ZERO(m_REFRESH_PERIOD);
+ CHECK_NON_ZERO(m_BASIC_BUS_BUSY_TIME);
+
+ CHECK_POWER_OF_2(m_BANKS_PER_RANK);
+ CHECK_POWER_OF_2(m_RANKS_PER_DIMM);
+ CHECK_POWER_OF_2(m_DIMMS_PER_CHANNEL);
+
+ CHECK_NON_ZERO(m_MemorySizeBytes);
+ // CHECK_NON_ZERO(m_DATA_BLOCK_BYTES);
+ CHECK_NON_ZERO(m_NUM_PROCESSORS);
+ CHECK_NON_ZERO(m_ProcsPerChip);
+
+ if (m_NUM_L2_BANKS == 0) { // defaults to number of ruby nodes
+ m_NUM_L2_BANKS = m_NUM_PROCESSORS;
}
- if (g_NUM_MEMORIES == 0) { // defaults to number of ruby nodes
- g_NUM_MEMORIES = g_NUM_PROCESSORS;
+ if (m_NUM_MEMORIES == 0) { // defaults to number of ruby nodes
+ m_NUM_MEMORIES = m_NUM_PROCESSORS;
}
- CHECK_ZERO(g_MEMORY_SIZE_BITS);
- CHECK_ZERO(g_DATA_BLOCK_BITS);
- CHECK_ZERO(g_PAGE_SIZE_BITS);
- CHECK_ZERO(g_NUM_PROCESSORS_BITS);
- CHECK_ZERO(g_NUM_CHIP_BITS);
- CHECK_ZERO(g_NUM_L2_BANKS_BITS);
- CHECK_ZERO(g_NUM_MEMORIES_BITS);
- CHECK_ZERO(g_PROCS_PER_CHIP_BITS);
- CHECK_ZERO(g_NUM_L2_BANKS_PER_CHIP);
- CHECK_ZERO(g_NUM_L2_BANKS_PER_CHIP_BITS);
- CHECK_ZERO(g_NUM_MEMORIES_BITS);
- CHECK_ZERO(g_MEMORY_MODULE_BLOCKS);
- CHECK_ZERO(g_MEMORY_MODULE_BITS);
- CHECK_ZERO(g_NUM_MEMORIES_PER_CHIP);
-
- CHECK_POWER_OF_2(g_MEMORY_SIZE_BYTES);
- CHECK_POWER_OF_2(g_DATA_BLOCK_BYTES);
- CHECK_POWER_OF_2(g_NUM_PROCESSORS);
- CHECK_POWER_OF_2(g_NUM_L2_BANKS);
- CHECK_POWER_OF_2(g_NUM_MEMORIES);
- CHECK_POWER_OF_2(g_PROCS_PER_CHIP);
-
- ASSERT(g_NUM_PROCESSORS >= g_PROCS_PER_CHIP); // obviously can't have less processors than procs/chip
- g_NUM_CHIPS = g_NUM_PROCESSORS/g_PROCS_PER_CHIP;
- ASSERT(g_NUM_L2_BANKS >= g_NUM_CHIPS); // cannot have a single L2cache across multiple chips
-
- g_NUM_L2_BANKS_PER_CHIP = g_NUM_L2_BANKS/g_NUM_CHIPS;
-
- ASSERT(L2_CACHE_NUM_SETS_BITS > log_int(g_NUM_L2_BANKS_PER_CHIP)); // cannot have less than one set per bank
- L2_CACHE_NUM_SETS_BITS = L2_CACHE_NUM_SETS_BITS - log_int(g_NUM_L2_BANKS_PER_CHIP);
-
- if (g_NUM_CHIPS > g_NUM_MEMORIES) {
- g_NUM_MEMORIES_PER_CHIP = 1; // some chips have a memory, others don't
+ CHECK_ZERO(m_MEMORY_SIZE_BITS);
+ CHECK_ZERO(m_NUM_PROCESSORS_BITS);
+ CHECK_ZERO(m_NUM_CHIP_BITS);
+ CHECK_ZERO(m_NUM_L2_BANKS_BITS);
+ CHECK_ZERO(m_NUM_MEMORIES_BITS);
+ CHECK_ZERO(m_PROCS_PER_CHIP_BITS);
+ CHECK_ZERO(m_NUM_L2_BANKS_PER_CHIP);
+ CHECK_ZERO(m_NUM_L2_BANKS_PER_CHIP_BITS);
+ CHECK_ZERO(m_NUM_MEMORIES_BITS);
+ CHECK_ZERO(m_MEMORY_MODULE_BLOCKS);
+ CHECK_ZERO(m_MEMORY_MODULE_BITS);
+ CHECK_ZERO(m_NUM_MEMORIES_PER_CHIP);
+
+ CHECK_POWER_OF_2(m_MemorySizeBytes);
+ CHECK_POWER_OF_2(m_NUM_PROCESSORS);
+ CHECK_POWER_OF_2(m_NUM_L2_BANKS);
+ CHECK_POWER_OF_2(m_NUM_MEMORIES);
+ CHECK_POWER_OF_2(m_ProcsPerChip);
+
+ assert(m_NUM_PROCESSORS >= m_ProcsPerChip); // obviously can't have less processors than procs/chip
+ m_NUM_CHIPS = m_NUM_PROCESSORS/m_ProcsPerChip;
+ assert(m_NUM_L2_BANKS >= m_NUM_CHIPS); // cannot have a single L2cache across multiple chips
+
+ m_NUM_L2_BANKS_PER_CHIP = m_NUM_L2_BANKS/m_NUM_CHIPS;
+
+ if (m_NUM_CHIPS > m_NUM_MEMORIES) {
+ m_NUM_MEMORIES_PER_CHIP = 1; // some chips have a memory, others don't
} else {
- g_NUM_MEMORIES_PER_CHIP = g_NUM_MEMORIES/g_NUM_CHIPS;
+ m_NUM_MEMORIES_PER_CHIP = m_NUM_MEMORIES/m_NUM_CHIPS;
}
- g_NUM_CHIP_BITS = log_int(g_NUM_CHIPS);
- g_MEMORY_SIZE_BITS = log_int(g_MEMORY_SIZE_BYTES);
- g_DATA_BLOCK_BITS = log_int(g_DATA_BLOCK_BYTES);
- g_PAGE_SIZE_BITS = log_int(g_PAGE_SIZE_BYTES);
- g_NUM_PROCESSORS_BITS = log_int(g_NUM_PROCESSORS);
- g_NUM_L2_BANKS_BITS = log_int(g_NUM_L2_BANKS);
- g_NUM_L2_BANKS_PER_CHIP_BITS = log_int(g_NUM_L2_BANKS_PER_CHIP);
- g_NUM_MEMORIES_BITS = log_int(g_NUM_MEMORIES);
- g_PROCS_PER_CHIP_BITS = log_int(g_PROCS_PER_CHIP);
-
- g_MEMORY_MODULE_BITS = g_MEMORY_SIZE_BITS - g_DATA_BLOCK_BITS - g_NUM_MEMORIES_BITS;
- g_MEMORY_MODULE_BLOCKS = (int64(1) << g_MEMORY_MODULE_BITS);
-
- if ((!Protocol::m_CMP) && (g_PROCS_PER_CHIP > 1)) {
- ERROR_MSG("Non-CMP protocol should set g_PROCS_PER_CHIP to 1");
- }
+ m_NUM_CHIP_BITS = log_int(m_NUM_CHIPS);
+ m_MEMORY_SIZE_BITS = log_int(m_MemorySizeBytes);
- // Randomize the execution
- srandom(g_RANDOM_SEED);
-}
+ m_data_block_mask = ~ (~0 << m_DATA_BLOCK_BITS);
-int RubyConfig::L1CacheNumToL2Base(NodeID L1CacheNum)
-{
- return L1CacheNum/g_PROCS_PER_CHIP;
+ m_NUM_PROCESSORS_BITS = log_int(m_NUM_PROCESSORS);
+ m_NUM_L2_BANKS_BITS = log_int(m_NUM_L2_BANKS);
+ m_NUM_L2_BANKS_PER_CHIP_BITS = log_int(m_NUM_L2_BANKS_PER_CHIP);
+ m_NUM_MEMORIES_BITS = log_int(m_NUM_MEMORIES);
+ m_PROCS_PER_CHIP_BITS = log_int(m_ProcsPerChip);
+
+ m_MEMORY_MODULE_BITS = m_MEMORY_SIZE_BITS - m_DATA_BLOCK_BITS - m_NUM_MEMORIES_BITS;
+ m_MEMORY_MODULE_BLOCKS = (int64(1) << m_MEMORY_MODULE_BITS);
+
+ */
+
+ // Randomize the execution
+ // srandom(m_RandomSeed);
}
static void print_parameters(ostream& out)
{
-#define PARAM(NAME) { out << #NAME << ": " << NAME << endl; }
-#define PARAM_UINT(NAME) { out << #NAME << ": " << NAME << endl; }
-#define PARAM_ULONG(NAME) { out << #NAME << ": " << NAME << endl; }
-#define PARAM_BOOL(NAME) { out << #NAME << ": " << bool_to_string(NAME) << endl; }
-#define PARAM_DOUBLE(NAME) { out << #NAME << ": " << NAME << endl; }
-#define PARAM_STRING(NAME) { assert(NAME != NULL); out << #NAME << ": " << string(NAME) << endl; }
-#define PARAM_ARRAY(PTYPE, NAME, ARRAY_SIZE) \
- { \
- out << #NAME << ": ("; \
- for (int i = 0; i < ARRAY_SIZE; i++) { \
- if (i != 0) { \
- out << ", "; \
- } \
- out << NAME[i]; \
- } \
- out << ")" << endl; \
- } \
-
-
-#include "mem/ruby/config/config.hh"
+#define print_true(NAME)
+#define print_false(NAME) \
+ out << #NAME << ": " << RubyConfig::get##NAME () << endl
+
+#define PARAM(NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR) { print_##CUSTOM_ACCESSOR(NAME); }
+#define PARAM_UINT(NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR) { print_##CUSTOM_ACCESSOR(NAME); }
+#define PARAM_ULONG(NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR) { print_##CUSTOM_ACCESSOR(NAME); }
+#define PARAM_BOOL(NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR) { print_##CUSTOM_ACCESSOR(NAME); }
+#define PARAM_DOUBLE(NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR) { print_##CUSTOM_ACCESSOR(NAME); }
+#define PARAM_STRING(NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR) { print_##CUSTOM_ACCESSOR(NAME); }
+#define PARAM_ARRAY( NAME, TYPE, DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) { out << #NAME << ": ARRAY" << endl; }
+#define PARAM_ARRAY2D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) { out << #NAME << ": ARRAY2D" << endl; }
+#define PARAM_ARRAY3D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, D3_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) { out << #NAME << ": ARRAY3D" << endl; }
+#include CONFIG_VAR_FILENAME
#undef PARAM
#undef PARAM_UINT
#undef PARAM_ULONG
@@ -172,15 +228,17 @@ static void print_parameters(ostream& out)
#undef PARAM_DOUBLE
#undef PARAM_STRING
#undef PARAM_ARRAY
+#undef PARAM_ARRAY2D
+#undef PARAM_ARRAY3D
}
void RubyConfig::printConfiguration(ostream& out) {
out << "Ruby Configuration" << endl;
out << "------------------" << endl;
- out << "protocol: " << CURRENT_PROTOCOL << endl;
+ //out << "protocol: " << CURRENT_PROTOCOL << endl;
out << "compiled_at: " << __TIME__ << ", " << __DATE__ << endl;
- out << "RUBY_DEBUG: " << bool_to_string(RUBY_DEBUG) << endl;
+ // out << "RUBY_DEBUG: " << bool_to_string(RUBY_DEBUG) << endl;
char buffer[100];
gethostname(buffer, 50);
diff --git a/src/mem/ruby/config/RubyConfig.hh b/src/mem/ruby/config/RubyConfig.hh
index 6de6bd1aa..f2e3a0f13 100644
--- a/src/mem/ruby/config/RubyConfig.hh
+++ b/src/mem/ruby/config/RubyConfig.hh
@@ -40,12 +40,13 @@
#ifndef RUBYCONFIG_H
#define RUBYCONFIG_H
-#include "mem/ruby/common/Global.hh"
-#include "mem/gems_common/ioutil/vardecl.hh"
-#include "mem/ruby/system/NodeID.hh"
+#include <cstdlib>
+#include <string>
+#include <ostream>
+#include <assert.h>
-#define MEMORY_LATENCY RubyConfig::memoryResponseLatency()
-#define ABORT_DELAY m_chip_ptr->getTransactionManager(m_version)->getAbortDelay()
+#include "mem/ruby/common/TypeDefines.hh"
+#define CONFIG_VAR_FILENAME "mem/ruby/config/config.hh"
// Set paramterization
/*
@@ -61,96 +62,169 @@
*/
const int NUMBER_WORDS_PER_SET = 4;
+using namespace std;
+
class RubyConfig {
public:
// CACHE BLOCK CONFIG VARIBLES
- static int dataBlockBits() { return g_DATA_BLOCK_BITS; }
- static int dataBlockBytes() { return g_DATA_BLOCK_BYTES; }
+ static uint32 dataBlockMask() { return m_data_block_mask; }
+
+ static int numberOfDMA() { return 1; }
+ static int numberOfDMAPerChip() { return 1; }
+ static int DMATransitionsPerCycle() { return 1; }
// SUPPORTED PHYSICAL MEMORY CONFIG VARIABLES
- static int pageSizeBits() { return g_PAGE_SIZE_BITS; }
- static int pageSizeBytes() { return g_PAGE_SIZE_BYTES; }
- static int memorySizeBits() { return g_MEMORY_SIZE_BITS; }
- static int64 memorySizeBytes() { return g_MEMORY_SIZE_BYTES; }
- static int memoryModuleBits() { return g_MEMORY_MODULE_BITS; }
- static int64 memoryModuleBlocks() { return g_MEMORY_MODULE_BLOCKS; }
-
- // returns number of SMT threads per physical processor
- static int numberofSMTThreads() { return g_NUM_SMT_THREADS; }
+ // static int memoryModuleBits() { return m_MEMORY_MODULE_BITS; }
+ // static int64 memoryModuleBlocks() { return m_MEMORY_MODULE_BLOCKS; }
+
// defines the number of simics processors (power of 2)
- static int numberOfProcessors() { return g_NUM_PROCESSORS; }
- static int procsPerChipBits() { return g_PROCS_PER_CHIP_BITS; }
- static int numberOfProcsPerChip() { return g_PROCS_PER_CHIP; }
- static int numberOfChips() { return g_NUM_CHIPS; }
+ // static int numberOfProcessors() { return m_NUM_PROCESSORS; }
+ // static int procsPerChipBits() { return m_PROCS_PER_CHIP_BITS; }
+ // static int numberOfProcsPerChip() { return m_ProcsPerChip; }
+ // static int numberOfChips() { return m_NUM_CHIPS; }
// MACHINE INSTANIATION CONFIG VARIABLES
// -------------------------------------
// L1 CACHE MACHINES
// defines the number of L1banks - idependent of ruby chips (power of 2)
// NOTE - no protocols currently supports L1s != processors, just a placeholder
- static int L1CacheBits() { return g_NUM_PROCESSORS_BITS; }
- static int numberOfL1Cache() { return g_NUM_PROCESSORS; }
- static int L1CachePerChipBits() { return procsPerChipBits() ; } // L1s != processors not currently supported
- static int numberOfL1CachePerChip() { return numberOfProcsPerChip(); } // L1s != processors not currently supported
- static int numberOfL1CachePerChip(NodeID myNodeID) { return numberOfL1CachePerChip(); }
- static int L1CacheTransitionsPerCycle() { return L1CACHE_TRANSITIONS_PER_RUBY_CYCLE; }
-
- // L2 CACHE MACHINES
- // defines the number of L2banks/L2Caches - idependent of ruby chips (power of 2)
- static int L2CacheBits() { return g_NUM_L2_BANKS_BITS; }
- static int numberOfL2Cache() { return g_NUM_L2_BANKS; }
- static int L1CacheNumToL2Base(NodeID L1RubyNodeID);
- static int L2CachePerChipBits() { return g_NUM_L2_BANKS_PER_CHIP_BITS; }
- static int numberOfL2CachePerChip() { return g_NUM_L2_BANKS_PER_CHIP; }
- static int numberOfL2CachePerChip(NodeID myNodeID) { return numberOfL2CachePerChip(); }
- static int L2CacheTransitionsPerCycle() { return L2CACHE_TRANSITIONS_PER_RUBY_CYCLE; }
// DIRECTORY/MEMORY MACHINES
// defines the number of ruby memories - idependent of ruby chips (power of 2)
- static int memoryBits() { return g_NUM_MEMORIES_BITS; }
- static int numberOfDirectory() { return numberOfMemories(); }
- static int numberOfMemories() { return g_NUM_MEMORIES; }
- static int numberOfDirectoryPerChip() { return g_NUM_MEMORIES_PER_CHIP; }
- static int numberOfDirectoryPerChip(NodeID myNodeID) { return g_NUM_MEMORIES_PER_CHIP; }
- static int DirectoryTransitionsPerCycle() { return DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE; }
-
- // PERSISTENT ARBITER MACHINES
- static int numberOfPersistentArbiter() { return numberOfMemories(); }
- static int numberOfPersistentArbiterPerChip() {return numberOfDirectoryPerChip(); }
- static int numberOfPersistentArbiterPerChip(NodeID myNodeID) {return numberOfDirectoryPerChip(myNodeID); }
- static int PersistentArbiterTransitionsPerCycle() { return L2CACHE_TRANSITIONS_PER_RUBY_CYCLE; }
+ // static int memoryBits() { return m_NUM_MEMORIES_BITS; }
+ // static int numberOfDirectory() { return numberOfMemories(); }
+ // static int numberOfMemories() { return m_NUM_MEMORIES; }
+ // static int numberOfDirectoryPerChip() { return m_NUM_MEMORIES_PER_CHIP; }
+ // static int DirectoryTransitionsPerCycle() { return m_DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE; }
// ---- END MACHINE SPECIFIC VARIABLES ----
// VARIABLE MEMORY RESPONSE LATENCY
// *** NOTE *** This is where variation is added to the simulation
// see Alameldeen et al. HPCA 2003 for further details
- static int memoryResponseLatency() { return MEMORY_RESPONSE_LATENCY_MINUS_2+(random() % 5); }
+// static int getMemoryLatency() { return m_MEMORY_RESPONSE_LATENCY_MINUS_2+(random() % 5); }
+ static void reset();
static void init();
- static void printConfiguration(ostream& out);
+ static void printConfiguration(std::ostream& out);
// Memory Controller
- static int memBusCycleMultiplier () { return MEM_BUS_CYCLE_MULTIPLIER; }
- static int banksPerRank () { return BANKS_PER_RANK; }
- static int ranksPerDimm () { return RANKS_PER_DIMM; }
- static int dimmsPerChannel () { return DIMMS_PER_CHANNEL; }
- static int bankBit0 () { return BANK_BIT_0; }
- static int rankBit0 () { return RANK_BIT_0; }
- static int dimmBit0 () { return DIMM_BIT_0; }
- static int bankQueueSize () { return BANK_QUEUE_SIZE; }
- static int bankBusyTime () { return BANK_BUSY_TIME; }
- static int rankRankDelay () { return RANK_RANK_DELAY; }
- static int readWriteDelay () { return READ_WRITE_DELAY; }
- static int basicBusBusyTime () { return BASIC_BUS_BUSY_TIME; }
- static int memCtlLatency () { return MEM_CTL_LATENCY; }
- static int refreshPeriod () { return REFRESH_PERIOD; }
- static int tFaw () { return TFAW; }
- static int memRandomArbitrate () { return MEM_RANDOM_ARBITRATE; }
- static int memFixedDelay () { return MEM_FIXED_DELAY; }
+
+// static int memBusCycleMultiplier () { return m_MEM_BUS_CYCLE_MULTIPLIER; }
+/* static int banksPerRank () { return m_BANKS_PER_RANK; }
+ static int ranksPerDimm () { return m_RANKS_PER_DIMM; }
+ static int dimmsPerChannel () { return m_DIMMS_PER_CHANNEL; }
+ static int bankBit0 () { return m_BANK_BIT_0; }
+ static int rankBit0 () { return m_RANK_BIT_0; }
+ static int dimmBit0 () { return m_DIMM_BIT_0; }
+ static int bankQueueSize () { return m_BANK_QUEUE_SIZE; }
+ static int bankBusyTime () { return m_BankBusyTime; }
+ static int rankRankDelay () { return m_RANK_RANK_DELAY; }
+ static int readWriteDelay () { return m_READ_WRITE_DELAY; }
+ static int basicBusBusyTime () { return m_BASIC_BUS_BUSY_TIME; }
+ static int memCtlLatency () { return m_MEM_CTL_LATENCY; }
+ static int refreshPeriod () { return m_REFRESH_PERIOD; }
+ static int tFaw () { return m_TFAW; }
+ static int memRandomArbitrate () { return m_MEM_RANDOM_ARBITRATE; }
+ static int memFixedDelay () { return m_MEM_FIXED_DELAY; }
+*/
+ /* cache accessors */
+ static int getCacheIDFromParams(int level, int num, string split_type) {
+ // TODO: this function
+ return 0;
+ }
+
+#define accessor_true( TYPE, NAME )
+#define accessor_false( TYPE, NAME ) \
+ static TYPE get##NAME() { return m_##NAME; } \
+ static void set##NAME(TYPE val) { m_##NAME = val; }
+
+#define array_accessor_true( TYPE, NAME, DEFAULT_ARRAY_SIZE )
+#define array_accessor_false( TYPE, NAME, DEFAULT_ARRAY_SIZE ) \
+ static TYPE get##NAME(int idx) { \
+ assert(m_##NAME != NULL); \
+ return m_##NAME[idx]; \
+ } \
+ static void set##NAME(int idx, TYPE val) { \
+ if(m_##NAME == NULL) { \
+ assert(DEFAULT_ARRAY_SIZE > 0); \
+ m_##NAME = new TYPE[DEFAULT_ARRAY_SIZE]; \
+ } \
+ m_##NAME[idx] = val; \
+ }
+
+#define array2d_accessor_true( TYPE, NAME )
+#define array2d_accessor_false( TYPE, NAME ) \
+ static TYPE get##NAME(int idx1, int idx2) { return m_##NAME[idx1][idx2]; } \
+ static void set##NAME(int idx1, int idx2, TYPE val) { m_##NAME[idx1][idx2] = val; }
+
+#define array3d_accessor_true( TYPE, NAME )
+#define array3d_accessor_false( TYPE, NAME ) \
+ static TYPE get##NAME(int idx1, int idx2, int idx3) { return m_##NAME[idx1][idx2][idx3]; } \
+ static void set##NAME(int idx1, int idx2, int idx3, TYPE val) { m_##NAME[idx1][idx2][idx3] = val; }
+
+#define PARAM( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ accessor_##CUSTOM_ACCESSOR(int32,NAME)
+#define PARAM_UINT( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ accessor_##CUSTOM_ACCESSOR(uint32,NAME)
+#define PARAM_ULONG( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ accessor_##CUSTOM_ACCESSOR(uint64,NAME)
+#define PARAM_BOOL( NAME, DEFAULT_VALUE,CUSTOM_ACCESSOR ) \
+ accessor_##CUSTOM_ACCESSOR(bool,NAME)
+#define PARAM_DOUBLE( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ accessor_##CUSTOM_ACCESSOR(double,NAME)
+#define PARAM_STRING( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ accessor_##CUSTOM_ACCESSOR(const char*,NAME)
+#define PARAM_ARRAY( NAME, TYPE, DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ array_accessor_##CUSTOM_ACCESSOR(TYPE, NAME, DEFAULT_ARRAY_SIZE)
+#define PARAM_ARRAY2D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ array2d_accessor_##CUSTOM_ACCESSOR(TYPE, NAME)
+#define PARAM_ARRAY3D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, D3_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ array3d_accessor_##CUSTOM_ACCESSOR(TYPE, NAME)
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+#undef PARAM_ARRAY2D
+#undef PARAM_ARRAY3D
private:
+ static uint32 m_data_block_mask;
+
+#define PARAM( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static int32 m_##NAME;
+#define PARAM_UINT( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static uint32 m_##NAME;
+#define PARAM_ULONG( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static uint64 m_##NAME;
+#define PARAM_BOOL( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static bool m_##NAME;
+#define PARAM_DOUBLE( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static double m_##NAME;
+#define PARAM_STRING( NAME, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static const char *m_##NAME;
+#define PARAM_ARRAY( NAME, TYPE, DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static TYPE* m_##NAME;
+#define PARAM_ARRAY2D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static TYPE** m_##NAME;
+#define PARAM_ARRAY3D( NAME, TYPE, D1_DEFAULT_ARRAY_SIZE, D2_DEFAULT_ARRAY_SIZE, D3_DEFAULT_ARRAY_SIZE, DEFAULT_VALUE, CUSTOM_ACCESSOR ) \
+ static TYPE*** m_##NAME;
+#include CONFIG_VAR_FILENAME
+#undef PARAM
+#undef PARAM_UINT
+#undef PARAM_ULONG
+#undef PARAM_BOOL
+#undef PARAM_DOUBLE
+#undef PARAM_STRING
+#undef PARAM_ARRAY
+#undef PARAM_ARRAY2D
+#undef PARAM_ARRAY3D
+
};
#endif //RUBYCONFIG_H
diff --git a/src/mem/ruby/config/cfg.rb b/src/mem/ruby/config/cfg.rb
new file mode 100644
index 000000000..de8bcafd2
--- /dev/null
+++ b/src/mem/ruby/config/cfg.rb
@@ -0,0 +1,751 @@
+#!/usr/bin/ruby
+
+class AssertionFailure < RuntimeError
+end
+
+class Boolean
+ def self.is_a?(obj)
+ return self.name == "Boolean"
+ end
+end
+
+def assert(condition,message)
+ unless condition
+ raise AssertionFailure, "Assertion failed: #{message}"
+ end
+end
+
+class LibRubyObject
+ @@all_objs = Array.new
+ attr_reader :obj_name
+ @@default_params = Hash.new
+
+ def initialize(obj_name)
+ assert obj_name.is_a?(String), "Obj_Name must be a string"
+ @obj_name = obj_name
+ @@all_objs << self
+ @params = Hash.new
+ end
+
+ def cppClassName()
+ raise NotImplementedException
+ end
+
+ def self.param(param_name, type)
+ idx = self.name.to_sym
+ @@default_params[idx] = Hash.new if ! @@default_params.key?(idx)
+ @@default_params[idx][param_name] = nil
+ send :define_method, param_name do
+ @params[param_name] = @@default_params[idx][param_name] if ! @params.key?(param_name)
+ @params[param_name]
+ end
+ method_name = (param_name.to_s + "=").to_sym
+ send :define_method, method_name do |val|
+ if val.is_a?(FalseClass) || val.is_a?(TrueClass)
+ assert type.is_a?(Boolean), "default value of param \"#{param_name}\" must be either true or false"
+ else
+ assert val.is_a?(type), "default value of param \"#{param_name}\" does not match type #{type}"
+ end
+# assert val.is_a?(type), "#{param_name} must be of type #{type}"
+ @params[param_name] = val
+ end
+ end
+
+ def self.default_param(param_name, type, default)
+ idx = self.name.to_sym
+ @@default_params[idx] = Hash.new if ! @@default_params.key?(idx)
+ if default.is_a?(FalseClass) || default.is_a?(TrueClass)
+ assert type.is_a?(Boolean), "default value of param \"#{param_name}\" must be either true or false"
+ else
+ assert default.is_a?(type), "default value of param \"#{param_name}\" does not match type #{type}"
+ end
+ @@default_params[idx][param_name] = default
+ send :define_method, param_name do
+ @params[param_name] = @@default_params[idx][param_name] if ! @params.key?(param_name)
+ @params[param_name]
+ end
+ method_name = (param_name.to_s + "=").to_sym
+ send :define_method, method_name do |val|
+ assert val.is_a?(type), "#{param_name} must be of type #{type}"
+ @params[param_name] = val
+ end
+ end
+
+ def applyDefaults()
+ idx = self.class.name.to_sym
+ @@default_params[idx] = Hash.new if ! @@default_params.key?(idx)
+ @@default_params[idx].each { |key, val|
+ @params[key] = val if ! @params.key?(key)
+ }
+ end
+
+ def argv()
+ str = ""
+
+ applyDefaults
+
+ @params.each { |key, val|
+ str += key.id2name + " "
+ if val.is_a?(LibRubyObject)
+ str += val.obj_name + " "
+ else
+ if val.is_a?(String) and val == ""
+ str += "null "
+ else
+ str += val.to_s + " "
+ end
+ end
+ }
+ return str
+ end
+
+ def self.printConstructors()
+ @@all_objs.each { |obj|
+ print obj.cppClassName, " ", obj.obj_name, " ",obj.argv,"\n"
+ }
+ end
+ def self.all()
+ @@all_objs
+ end
+end
+
+class IfacePort < LibRubyObject
+ def initialize(obj_name)
+ super(obj_name)
+ end
+
+ def bochsConnType
+ raise NotImplementedException
+ end
+end
+
+class NetPort < LibRubyObject
+ attr :mach_type
+ attr_reader :version
+
+ @@type_cnt = Hash.new
+ @type_id
+ def initialize(obj_name, mach_type)
+ super(obj_name)
+ @mach_type = mach_type
+ @@type_cnt[mach_type] ||= 0
+ @type_id = @@type_cnt[mach_type]
+ @@type_cnt[mach_type] += 1
+
+ idx = "NetPort".to_sym
+ @@default_params[idx] = Hash.new if ! @@default_params.key?(idx)
+ @@default_params[idx].each { |key, val|
+ @params[key] = val if ! @params.key?(key)
+ }
+ end
+
+ def port_name
+ mach_type
+ end
+ def port_num
+ @type_id
+ end
+ def cppClassName
+ "NetPort"
+ end
+end
+
+class MemoryVector < LibRubyObject
+ def initialize(obj_name)
+ super(obj_name)
+ end
+
+ def cppClassName
+ "MemoryController"
+ end
+end
+
+class Debug < LibRubyObject
+ def initialize *args
+ case args.size
+ when 1
+ super(args[0])
+ when 6
+ init_params *args[1]
+ else
+ raise Exception
+ end
+ end
+
+ def init_params (protocol_trace, filter_string, verbosity_string, start_time, output_filename)
+ @params[:protocol_trace] = protocol_trace
+ @params[:filter_string] = filter_string
+ @params[:verbosity_string] = verbosity_string
+ @params[:start_time] = start_time
+ @params[:output_filename] = output_filename
+ end
+
+ def cppClassName
+ "Debug"
+ end
+end
+
+class RubySystem
+
+ @@params = Hash.new
+ @@network = nil
+
+ def self.init(iface_ports, network)
+ @@iface_ports = iface_ports
+ @@network = network
+ end
+
+ def self.default_param(param_name, type, default)
+ if default.is_a?(FalseClass) || default.is_a?(TrueClass)
+ assert type.is_a?(Boolean), "default value of param \"#{param_name}\" must be either true or false"
+ else
+ assert default.is_a?(type), "default value of param \"#{param_name}\" does not match type #{type}"
+ end
+ @@params[param_name] = default
+ method_name = (param_name.to_s).to_sym
+ instance_eval <<-EOS
+ def #{method_name.to_s}
+ @@params[:#{param_name.to_s}]
+ end
+ EOS
+ instance_eval <<-EOS
+ def #{method_name.to_s}=(val)
+ @@params[:#{param_name.to_s}] = val
+ end
+ EOS
+ end
+
+ def self.generateConfig()
+ # get current time for random seed if set to "rand"
+ if @@params[:random_seed] == "rand"
+ t = Time.now
+ @@params[:random_seed] = t.usec.to_i
+ end
+ if ! @@params[:random_seed].is_a?(Integer)
+ raise TypeException
+ end
+ print "System sys0 ",argv,"\n"
+ LibRubyObject.all.each { |obj|
+ if obj.is_a?(SetAssociativeCache)
+ obj.calculateLatency
+ end
+ }
+ LibRubyObject.printConstructors
+ end
+
+ def self.printIfacePorts()
+ @@iface_ports.each { |port|
+ print port.obj_name, " "
+ }
+ puts
+ end
+
+ def self.getBochsConnections()
+ ports = Hash.new
+ @@iface_ports.each { |port|
+ ports[port.obj_name] = port.bochsConnType
+ }
+ return ports
+ end
+
+ def self.getMemorySizeMB()
+ DirectoryMemory.memorySizeMB
+ end
+
+ # override the default accessors (generated by default_param) for random_seed
+ def self.random_seed=(seed)
+ assert (val.is_a?(Integer) or val == "rand"), "RubySystem.random_seed takes either an integer value or the string \"rand\""
+ @@params[:random_seed] = seed
+ end
+
+private
+
+ def self.argv()
+ str = ""
+ @@params.each { |key, val|
+ str += key.id2name + " "
+ str += val.to_s + " "
+ }
+ return str
+ end
+
+ def self.writeConfig()
+ @@network.printTopology
+ end
+
+end
+
+#require "defaults.rb"
+
+
+
+class CacheController < NetPort
+ @@total_cache_controllers = 0
+ attr :caches
+ attr :sequencer
+ def initialize(obj_name, mach_type, caches, sequencer)
+ super(obj_name, mach_type)
+ @caches = caches
+ @caches.each { |cache|
+ cache.controller = self
+ }
+
+ @sequencer = sequencer
+ @sequencer.controller = self
+
+ @version = @@total_cache_controllers
+ @@total_cache_controllers += 1
+ @sequencer.version = @version
+ buffer_size()
+ end
+
+ def argv()
+ vec = "version "+@version.to_s
+ @caches.each { |cache|
+ vec += " cache " + cache.obj_name
+ }
+ vec += " sequencer "+@sequencer.obj_name
+ vec += " transitions_per_cycle "+@params[:transitions_per_cycle].to_s
+ vec += " buffer_size "+@params[:buffer_size].to_s
+ vec += " number_of_TBEs "+@params[:number_of_TBEs].to_s
+
+ end
+
+ def cppClassName()
+ "generated:"+@mach_type
+ end
+end
+
+class DirectoryController < NetPort
+ @@total_directory_controllers = 0
+ attr :directory
+ attr :memory_control
+
+ def initialize(obj_name, mach_type, directory, memory_control)
+ super(obj_name, mach_type)
+
+ @directory = directory
+ directory.controller = self
+
+ @memory_control = memory_control
+
+ @version = @@total_directory_controllers
+ @@total_directory_controllers += 1
+ buffer_size()
+ end
+
+ def argv()
+ "version "+@version.to_s+" directory_name "+@directory.obj_name+" transitions_per_cycle "+@params[:transitions_per_cycle].to_s + " buffer_size "+@params[:buffer_size].to_s + " number_of_TBEs "+@params[:number_of_TBEs].to_s + " memory_controller_name "+@memory_control.obj_name + " recycle_latency "+@params[:recycle_latency].to_s
+ end
+
+ def cppClassName()
+ "generated:"+@mach_type
+ end
+
+end
+
+class DMAController < NetPort
+ @@total_dma_controllers = 0
+ attr :dma_sequencer
+ def initialize(obj_name, mach_type, dma_sequencer)
+ super(obj_name, mach_type)
+ @dma_sequencer = dma_sequencer
+ @version = @@total_dma_controllers
+ @@total_dma_controllers += 1
+ dma_sequencer.controller = self
+ buffer_size
+ end
+
+ def argv()
+ "version "+@version.to_s+" dma_sequencer "+@dma_sequencer.obj_name+" transitions_per_cycle "+@params[:transitions_per_cycle].to_s + " buffer_size "+@params[:buffer_size].to_s + " number_of_TBEs "+@params[:number_of_TBEs].to_s
+ end
+
+ def cppClassName()
+ "generated:"+@mach_type
+ end
+end
+
+class Cache < LibRubyObject
+ attr :size_kb, :latency
+ attr_writer :controller
+ def initialize(obj_name, size_kb, latency)
+ super(obj_name)
+ assert size_kb.is_a?(Integer), "Cache size must be an integer"
+ @size_kb = size_kb
+ @latency = latency
+ end
+
+ def args
+ "controller "+@controller.obj_name+" size_kb "+@size_kb.to_s+" latency "+@latency.to_s
+ end
+end
+
+class SetAssociativeCache < Cache
+ attr :assoc, :replacement_policy
+
+ # latency can be either an integer, a float, or the string "auto"
+ # when an integer, it represents the number of cycles for a hit
+ # when a float, it represents the cache access time in ns
+ # when set to "auto", libruby will attempt to find a realistic latency by running CACTI
+ def initialize(obj_name, size_kb, latency, assoc, replacement_policy)
+ super(obj_name, size_kb, latency)
+ @assoc = assoc
+ @replacement_policy = replacement_policy
+ end
+
+ def calculateLatency()
+ if @latency == "auto"
+ cacti_args = Array.new()
+ cacti_args << (@size_kb*1024) << RubySystem.block_size_bytes << @assoc
+ cacti_args << 1 << 0 << 0 << 0 << 1
+ cacti_args << RubySystem.tech_nm << RubySystem.block_size_bytes*8
+ cacti_args << 0 << 0 << 0 << 1 << 0 << 0 << 0 << 0 << 1
+ cacti_args << 360 << 0 << 0 << 0 << 0 << 1 << 1 << 1 << 1 << 0 << 0
+ cacti_args << 50 << 10 << 10 << 0 << 1 << 1
+
+ cacti_cmd = File.dirname(__FILE__) + "/cacti/cacti " + cacti_args.join(" ")
+
+ IO.popen(cacti_cmd) { |pipe|
+ str1 = pipe.readline
+ str2 = pipe.readline
+ results = str2.split(", ")
+ if results.size != 61
+ print "CACTI ERROR: CACTI produced unexpected output.\n"
+ print "Are you using the version shipped with libruby?\n"
+ raise Exception
+ end
+ latency_ns = results[5].to_f
+ if (latency_ns == "1e+39")
+ print "CACTI ERROR: CACTI was unable to realistically model the cache ",@obj_name,"\n"
+ print "Either change the cache parameters or manually set the latency values\n"
+ raise Exception
+ end
+ clk_period_ns = 1e9 * (1.0 / (RubySystem.freq_mhz * 1e6))
+ latency_cycles = (latency_ns / clk_period_ns).ceil
+ @latency = latency_cycles
+ }
+ elsif @latency.is_a?(Float)
+ clk_period_ns = 1e9 * (1.0 / (RubySystem.freq_mhz * 1e6))
+ latency_cycles = (@latency / clk_period_ns).ceil
+ @latency = latency_cycles
+ elsif ! @latency.is_a?(Integer)
+ raise Exception
+ end
+ end
+
+ def argv()
+ args+" assoc "+@assoc.to_s+" replacement_policy "+@replacement_policy
+ end
+
+ def cppClassName()
+ "SetAssociativeCache"
+ end
+end
+
+class DirectoryMemory < LibRubyObject
+ attr :size_mb
+ attr_writer :controller
+ @@total_size_mb = 0
+
+ def initialize(obj_name, size_mb)
+ super(obj_name)
+ @size_mb = size_mb
+ @@total_size_mb += size_mb
+ end
+
+ def argv()
+ "version "+@controller.version.to_s+" size_mb "+@size_mb.to_s+" controller "+@controller.obj_name
+ end
+
+ def cppClassName()
+ "DirectoryMemory"
+ end
+
+ def self.memorySizeMB()
+ @@total_size_mb
+ end
+end
+
+#added by SS
+class MemoryControl < LibRubyObject
+ attr :name
+ def initialize(obj_name)
+ super(obj_name)
+ @name = obj_name
+ end
+
+ def argv()
+ vec = super()
+ vec += " mem_bus_cycle_multiplier "+mem_bus_cycle_multiplier.to_s
+ vec += " banks_per_rank "+banks_per_rank.to_s
+ vec += " ranks_per_dimm "+ranks_per_dimm.to_s
+ vec += " dimms_per_channel "+dimms_per_channel.to_s
+ vec += " bank_bit_0 "+bank_bit_0.to_s
+ vec += " rank_bit_0 "+rank_bit_0.to_s
+ vec += " dimm_bit_0 "+dimm_bit_0.to_s
+ vec += " bank_queue_size "+bank_queue_size.to_s
+ vec += " bank_busy_time "+bank_busy_time.to_s
+ vec += " rank_rank_delay "+rank_rank_delay.to_s
+ vec += " read_write_delay "+read_write_delay.to_s
+ vec += " basic_bus_busy_time "+basic_bus_busy_time.to_s
+ vec += " mem_ctl_latency "+mem_ctl_latency.to_s
+ vec += " refresh_period "+refresh_period.to_s
+ vec += " tFaw "+tFaw.to_s
+ vec += " mem_random_arbitrate "+mem_random_arbitrate.to_s
+ vec += " mem_fixed_delay "+mem_fixed_delay.to_s
+ vec += " memory_controller_name "+@name
+
+ end
+
+
+ def cppClassName()
+ "MemoryControl"
+ end
+end
+
+
+
+class Sequencer < IfacePort
+
+ def cppClassName()
+ "Sequencer"
+ end
+
+ param :controller, NetPort # must be set after initialization
+ param :icache, Cache
+ param :dcache, Cache
+ param :version, Integer
+
+ def initialize(obj_name, icache, dcache)
+ super(obj_name)
+ self.icache=icache
+ self.dcache=dcache
+ end
+
+ def bochsConnType()
+ return "cpu"+version.to_s
+ end
+
+end
+
+
+
+class DMASequencer < IfacePort
+ def initialize(obj_name)
+ super(obj_name)
+ @params = {
+ :controller => nil,
+ :version => nil
+ }
+ end
+
+ def controller=(controller)
+ @params[:controller] = controller.obj_name
+ @params[:version] = controller.version
+ end
+
+ def cppClassName()
+ "DMASequencer"
+ end
+
+ def bochsConnType()
+ return "dma"+@params[:version].to_s
+ end
+end
+
+class IntNode
+ @@num = 0
+ def initialize()
+
+ end
+end
+
+class Network < LibRubyObject
+end
+
+class Topology < LibRubyObject
+ attr :net_ports
+ param :network, Network
+ def initialize(name, net_ports)
+ super(name)
+ @net_ports = net_ports
+ end
+
+ def cppClassName
+ "Topology"
+ end
+end
+
+class Network < LibRubyObject
+ param :topology, Topology
+ def initialize(name, topo)
+ super(name)
+ @params[:topology] = topo
+ topo.network= self
+ end
+
+ def argv()
+ vec = super()
+
+ vec += " endpoint_bandwidth "+endpoint_bandwidth.to_s
+ vec += " adaptive_routing "+adaptive_routing.to_s
+ vec += " number_of_virtual_networks "+number_of_virtual_networks.to_s
+ vec += " fan_out_degree "+fan_out_degree.to_s
+
+ vec += " buffer_size "+buffer_size.to_s
+ vec += " link_latency "+adaptive_routing.to_s
+ vec += " on_chip_latency "+on_chip_latency.to_s
+
+ end
+
+ def printTopology()
+ topology.printFile
+ end
+ def cppClassName()
+ "SimpleNetwork"
+ end
+
+end
+
+class PtToPtTopology < Topology
+
+ param :connections,String
+
+ def initialize(name, net_ports)
+ super(name, net_ports)
+ @params[:connections] = ""
+ @net_ports.each_index { |idx|
+ @params[:connections] << ("ext_node:"+@net_ports[idx].port_name+":"+@net_ports[idx].port_num.to_s)
+ @params[:connections] << ("%int_node:"+ idx.to_s+ "%link_latency:"+ link_latency.to_s)
+ @params[:connections] << ("%bw_multiplier:"+external_bw.to_s+"#")
+ }
+ @net_ports.each_index { |outer_idx|
+ @net_ports.each_index { |inner_idx|
+ if (outer_idx != inner_idx)
+ @params[:connections] << ("int_node:"+ outer_idx.to_s+ "%int_node:"+ inner_idx.to_s)
+ @params[:connections] << ("%link_latency:"+link_latency.to_s+"%bw_multiplier:"+internal_bw.to_s)
+ @params[:connections] << ("%link_weight:"+1.to_s+"#")
+ end
+ }
+ }
+ # call the accessors of the parent class to initialize them
+ # need to find a better method!!
+ print_config
+ end
+
+end
+
+class CrossbarTopology < Topology
+ param :connections,String
+
+ def initialize(name, net_ports)
+ super(name, net_ports)
+ @params[:connections] = ""
+ crossbar_node = @net_ports.size
+ @net_ports.each_index { |idx|
+ @params[:connections] << ("ext_node:"+@net_ports[idx].port_name+":"+@net_ports[idx].port_num.to_s)
+ @params[:connections] << ("%int_node:"+ idx.to_s+ "%link_latency:"+ link_latency.to_s)
+ @params[:connections] << ("%bw_multiplier:"+external_bw.to_s+"#")
+ }
+ @net_ports.each_index { |idx|
+ @params[:connections] << ("int_node:"+idx.to_s+"%int_node:"+crossbar_node.to_s)
+ @params[:connections] << ("%link_latency:"+link_latency.to_s+"%bw_multiplier:"+internal_bw.to_s)
+ @params[:connections] << ("%link_weight:"+1.to_s+"#")
+ }
+ print_config
+ end
+end
+
+#added by SS
+class Tracer < LibRubyObject
+ def initialize(obj_name)
+ super(obj_name)
+ end
+
+ def cppClassName()
+ "Tracer"
+ end
+
+end
+
+class Profiler < LibRubyObject
+ def initialize(obj_name)
+ super(obj_name)
+ end
+
+ def cppClassName()
+ "Profiler"
+ end
+
+end
+
+class MI_example_CacheController < CacheController
+ def initialize(obj_name, mach_type, caches, sequencer)
+ super(obj_name, mach_type, caches, sequencer)
+ end
+ def argv()
+ vec = super()
+ vec += " issue_latency "+issue_latency.to_s
+ vec += " cache_response_latency "+cache_response_latency.to_s
+ end
+
+end
+
+class MI_example_DirectoryController < DirectoryController
+ def initialize(obj_name, mach_type, directory, memory_control)
+ super(obj_name, mach_type, directory, memory_control)
+ end
+ def argv()
+ vec = super()
+ vec += " to_mem_ctrl_latency "+to_mem_ctrl_latency.to_s
+ vec += " directory_latency "+directory_latency.to_s
+ vec += " memory_latency "+memory_latency.to_s
+ end
+
+end
+
+#added by SS
+class GarnetNetwork < Network
+ def initialize(name, topo)
+ super(name, topo)
+ end
+ def argv()
+ vec = super()
+ vec += " flit_size "+flit_size.to_s
+ vec += " number_of_pipe_stages "+number_of_pipe_stages.to_s
+ vec += " vcs_per_class "+vcs_per_class.to_s
+ vec += " buffer_size "+buffer_size.to_s
+ vec += " using_network_testing "+using_network_testing.to_s
+ end
+
+end
+
+class GarnetFixedPipeline < GarnetNetwork
+ def initialize(name, net_ports)
+ super(name, net_ports)
+ end
+
+ def argv()
+ super()
+ end
+
+ def cppClassName()
+ "GarnetNetwork_d"
+ end
+end
+
+class GarnetFlexiblePipeline < GarnetNetwork
+ def initialize(name, net_ports)
+ super(name, net_ports)
+ end
+
+ def argv()
+ super()
+ end
+
+ def cppClassName()
+ "GarnetNetwork"
+ end
+end
+
+require "defaults.rb"
diff --git a/src/mem/ruby/config/config.hh b/src/mem/ruby/config/config.hh
index 3cad258a2..ad91cd73d 100644
--- a/src/mem/ruby/config/config.hh
+++ b/src/mem/ruby/config/config.hh
@@ -1,74 +1,23 @@
-//
-// This file has been modified by Kevin Moore and Dan Nussbaum of the
-// Scalable Systems Research Group at Sun Microsystems Laboratories
-// (http://research.sun.com/scalable/) to support the Adaptive
-// Transactional Memory Test Platform (ATMTP). For information about
-// ATMTP, see the GEMS website: http://www.cs.wisc.edu/gems/.
-//
-// Please send email to atmtp-interest@sun.com with feedback, questions, or
-// to request future announcements about ATMTP.
-//
-// ----------------------------------------------------------------------
-//
-// File modification date: 2008-02-23
-//
-// ----------------------------------------------------------------------
-//
-// ATMTP is distributed as part of the GEMS software toolset and is
-// available for use and modification under the terms of version 2 of the
-// GNU General Public License. The GNU General Public License is contained
-// in the file $GEMS/LICENSE.
-//
-// Multifacet GEMS is free software; you can redistribute it and/or modify
-// it under the terms of version 2 of the GNU General Public License as
-// published by the Free Software Foundation.
-//
-// Multifacet GEMS is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with the Multifacet GEMS; if not, write to the Free Software Foundation,
-// Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
-//
-// ----------------------------------------------------------------------
-//
-
-// see rubyconfig.defaults for some explanations
-
-PARAM( g_RANDOM_SEED );
-
-// Maximum number of cycles a request is can be outstanding before the
-// Sequencer of StoreBuffer declares we're in deadlock/livelock
-PARAM( g_DEADLOCK_THRESHOLD );
-PARAM_BOOL( RANDOMIZATION );
-PARAM_BOOL( g_SYNTHETIC_DRIVER );
-PARAM_BOOL( g_DETERMINISTIC_DRIVER );
// FOR MOESI_CMP_token
-PARAM_BOOL( g_FILTERING_ENABLED );
-PARAM_BOOL( g_DISTRIBUTED_PERSISTENT_ENABLED );
-PARAM_BOOL( g_DYNAMIC_TIMEOUT_ENABLED );
-PARAM( g_RETRY_THRESHOLD );
-PARAM( g_FIXED_TIMEOUT_LATENCY );
-
-PARAM( g_trace_warmup_length );
-PARAM_DOUBLE( g_bash_bandwidth_adaptive_threshold );
+//PARAM_BOOL( FilteringEnabled, false, false );
+//PARAM_BOOL( DistributedPersistentEnabled, true, false );
+//PARAM_BOOL( DynamicTimeoutEnabled, true, false );
+//PARAM( RetryThreshold, 1, false );
+//PARAM( FixedTimeoutLatency, 300, false );
-PARAM( g_tester_length );
-PARAM( g_synthetic_locks );
-PARAM( g_deterministic_addrs );
-// Specified Generator: See SpecifiedGeneratorType in external.sm for valid values
-PARAM_STRING( g_SpecifiedGenerator );
-PARAM( g_callback_counter );
-PARAM( g_NUM_COMPLETIONS_BEFORE_PASS );
+//PARAM( TraceWarmupLength, 1000000, false );
-PARAM( g_NUM_SMT_THREADS );
+//PARAM( callback_counter, 0, false );
+//PARAM( NUM_COMPLETIONS_BEFORE_PASS, 0, false );
-PARAM( g_think_time );
-PARAM( g_hold_time );
-PARAM( g_wait_time );
+//PARAM( tester_length, 0, false );
+//PARAM( synthetic_locks, 2048, false );
+//PARAM( think_time, 5, false );
+//PARAM( wait_time, 5, false );
+//PARAM( hold_time, 5, false );
+//PARAM( deterministic_addrs, 1, false );
+//PARAM_STRING( SpecifiedGenerator, "DetermInvGenerator", false );
// For debugging purposes, one can enable a trace of all the protocol
// state machine changes. Unfortunately, the code to generate the
@@ -80,243 +29,208 @@ PARAM( g_wait_time );
// "g_debug_ptr->setDebugTime(1)" to beging the following to set the
// debug begin time
//
-// this use to be ruby/common/Global.h
+// this use to be ruby/common/Global.hh
-PARAM_BOOL( PROTOCOL_DEBUG_TRACE );
-// a string for filtering debugging output (for all g_debug vars see Debug.h)
-PARAM_STRING( DEBUG_FILTER_STRING );
+//PARAM_BOOL( ProtocolDebugTrace, true, false );
+// a string for filtering debugging output (for all g_debug vars see Debug.hh)
+//PARAM_STRING( DEBUG_FILTER_STRING, "", false );
// filters debugging messages based on priority (low, med, high)
-PARAM_STRING( DEBUG_VERBOSITY_STRING );
+//PARAM_STRING( DEBUG_VERBOSITY_STRING, "", false );
// filters debugging messages based on a ruby time
-PARAM_ULONG( DEBUG_START_TIME );
+//PARAM_ULONG( DEBUG_START_TIME, 0, false );
// sends debugging messages to a output filename
-PARAM_STRING( DEBUG_OUTPUT_FILENAME );
-
-// defines relative (integer) clock multipliers between ruby, opal, and simics
-PARAM( SIMICS_RUBY_MULTIPLIER );
-PARAM( OPAL_RUBY_MULTIPLIER );
+//PARAM_STRING( DEBUG_OUTPUT_FILENAME, "", false );
-PARAM_BOOL( TRANSACTION_TRACE_ENABLED );
-PARAM_BOOL( USER_MODE_DATA_ONLY );
-PARAM_BOOL( PROFILE_HOT_LINES );
+//PARAM_BOOL( ProfileHotLines, false, false );
// PROFILE_ALL_INSTRUCTIONS is used if you want Ruby to profile all instructions executed
// The following need to be true for this to work correctly:
// 1. Disable istc and dstc for this simulation run
// 2. Add the following line to the object "sim" in the checkpoint you run from:
-// instruction_profile_line_size: 4
+// instruction_profile_line_size: 4
// This is used to have simics report back all instruction requests
// For more details on how to find out how to interpret the output physical instruction
// address, please read the document in the simics-howto directory
-PARAM_BOOL( PROFILE_ALL_INSTRUCTIONS );
+//PARAM_BOOL( ProfileAllInstructions, false, false );
// Set the following variable to true if you want a complete trace of
// PCs (physical address of program counters, with executing processor IDs)
// to be printed to stdout. Make sure to direct the simics output to a file.
// Otherwise, the run will take a really long time!
// A long run may write a file that can exceed the OS limit on file length
-PARAM_BOOL( PRINT_INSTRUCTION_TRACE );
-PARAM( g_DEBUG_CYCLE );
-
-// Don't allow any datablocks to enter the STC
-PARAM_BOOL( BLOCK_STC );
+//PARAM_BOOL( PRINT_INSTRUCTION_TRACE, false, false );
+//PARAM( DEBUG_CYCLE, 0, false );
// Make the entire memory system perfect
-PARAM_BOOL( PERFECT_MEMORY_SYSTEM );
-PARAM( PERFECT_MEMORY_SYSTEM_LATENCY );
-
-PARAM_BOOL( DATA_BLOCK ); // Define NO_DATA_BLOCK to make the DataBlock take zero space
-
-PARAM_BOOL( REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH );
+//PARAM_BOOL( PERFECT_MEMORY_SYSTEM, false, false );
+//PARAM( PERFECT_MEMORY_SYSTEM_LATENCY, 0, false );
// *********************************************
-// CACHE & MEMORY PARAMETERS
+// SYSTEM PARAMETERS
// *********************************************
+//PARAM( NumberOfChips, 1, false );
+//PARAM( NumberOfCores, 2, false );
+//PARAM_ARRAY( NumberOfCoresPerChip, int, m_NumberOfChips, 2, false);
-PARAM( L1_CACHE_ASSOC );
-PARAM( L1_CACHE_NUM_SETS_BITS );
-PARAM( L2_CACHE_ASSOC );
-PARAM( L2_CACHE_NUM_SETS_BITS );
-
-PARAM_ULONG( g_MEMORY_SIZE_BYTES );
-PARAM( g_DATA_BLOCK_BYTES );
-// The following page size parameter is used by the stride prefetcher
-PARAM( g_PAGE_SIZE_BYTES );
-PARAM_STRING( g_REPLACEMENT_POLICY );
+// *********************************************
+// CACHE PARAMETERS
+// *********************************************
-PARAM( g_NUM_PROCESSORS );
-PARAM( g_NUM_L2_BANKS );
-PARAM( g_NUM_MEMORIES );
-PARAM( g_PROCS_PER_CHIP );
+//PARAM( NumberOfCaches, m_NumberOfCores, false );
+//PARAM( NumberOfCacheLevels, 1, false );
+/* this returns the number of discrete CacheMemories per level (i.e. a split L1 counts for 2) */
+//PARAM_ARRAY( NumberOfCachesPerLevel, int, m_NumberOfCacheLevels, m_NumberOfCores, false ); // this is the number of discrete caches if the level is private
+ // or the number of banks if the level is shared
+//PARAM( CacheIDFromParams, 1, true ); // returns a unique CacheID from the parameters (level, num, split_type)
+//PARAM_ARRAY( CacheLatency, int, m_NumberOfCaches, 1, false ); // returns the latency for cache, indexed by CacheID
+//PARAM_ARRAY( CacheSplitType, string, m_NumberOfCaches, "unified", false ); // returns "data", "instruction", or "unified", indexed by CacheID
+//PARAM_ARRAY( CacheType, string, m_NumberOfCaches, "SetAssociative", false ); // returns the type of a cache, indexed by CacheID
+//PARAM_ARRAY( CacheAssoc, int, m_NumberOfCaches, 4, false ); // returns the cache associativity, indexed by CacheID
+//PARAM_ARRAY( NumberOfCacheSets, int, m_NumberOfCaches, 256, false ); // returns the number of cache sets, indexed by CacheID
+//PARAM_ARRAY( NumberOfCacheSetBits, int, m_NumberOfCaches, log_int(256), false ); // returns the number of cache set bits, indexed by CacheID
+//PARAM_ARRAY( CacheReplacementPolicy, string, m_NumberOfCaches, "PSEUDO_LRU", false ); // other option is "LRU"
+
+//PARAM( DataBlockBytes, 64, false );
+//PARAM( DataBlockBits, log_int(m_DataBlockBytes), false);
+
+// ********************************************
+// MEMORY PARAMETERS
+// ********************************************
+
+//PARAM_ARRAY( NumberOfControllersPerType, int, m_NumberOfCacheLevels+2, m_NumberOfCores, false);
+//PARAM_ARRAY2D( NumberOfControllersPerTypePerChip, int, m_NumberOfCacheLevels+2, m_NumberOfChips, m_NumberOfCores, false);
+
+// ********************************************
+// DMA CONTROLLER PARAMETERS
+// ********************************************
+
+//PARAM( NumberOfDMA, 1, false );
+//PARAM_ARRAY( NumberOfDMAPerChip, int, m_NumberOfChips, 1, false);
+//PARAM_ARRAY( ChipNumFromDMAVersion, int, m_NumberOfDMA, 0, false );
+
+//PARAM_ULONG( MemorySizeBytes, 4294967296, false );
+//PARAM_ULONG( MemorySizeBits, 32, false);
+
+//PARAM( NUM_PROCESSORS, 0, false );
+//PARAM( NUM_L2_BANKS, 0, false );
+//PARAM( NUM_MEMORIES, 0, false );
+//PARAM( ProcsPerChip, 1, false );
// The following group of parameters are calculated. They must
// _always_ be left at zero.
-PARAM( g_NUM_CHIPS );
-PARAM( g_NUM_CHIP_BITS );
-PARAM( g_MEMORY_SIZE_BITS );
-PARAM( g_DATA_BLOCK_BITS );
-PARAM( g_PAGE_SIZE_BITS );
-PARAM( g_NUM_PROCESSORS_BITS );
-PARAM( g_PROCS_PER_CHIP_BITS );
-PARAM( g_NUM_L2_BANKS_BITS );
-PARAM( g_NUM_L2_BANKS_PER_CHIP_BITS );
-PARAM( g_NUM_L2_BANKS_PER_CHIP );
-PARAM( g_NUM_MEMORIES_BITS );
-PARAM( g_NUM_MEMORIES_PER_CHIP );
-PARAM( g_MEMORY_MODULE_BITS );
-PARAM_ULONG( g_MEMORY_MODULE_BLOCKS );
-
-// determines the mapping between L2 banks and sets within L2 banks
-PARAM_BOOL( MAP_L2BANKS_TO_LOWEST_BITS );
+//PARAM( NUM_CHIPS, 0, false );
+//PARAM( NUM_CHIP_BITS, 0, false );
+//PARAM( MEMORY_SIZE_BITS, 0, false );
+//PARAM( DATA_BLOCK_BITS, 0, false );
+//PARAM( PAGE_SIZE_BITS, 0, false );
+//PARAM( NUM_PROCESSORS_BITS, 0, false );
+//PARAM( PROCS_PER_CHIP_BITS, 0, false );
+//PARAM( NUM_L2_BANKS_BITS, 0, false );
+//PARAM( NUM_L2_BANKS_PER_CHIP_BITS, 0, false );
+//PARAM( NUM_L2_BANKS_PER_CHIP, 0, false );
+//PARAM( NUM_MEMORIES_BITS, 0, false );
+//PARAM( NUM_MEMORIES_PER_CHIP, 0, false );
+//PARAM( MEMORY_MODULE_BITS, 0, false );
+//PARAM_ULONG( MEMORY_MODULE_BLOCKS, 0, false );
// TIMING PARAMETERS
-PARAM( DIRECTORY_CACHE_LATENCY );
-
-PARAM( NULL_LATENCY );
-PARAM( ISSUE_LATENCY );
-PARAM( CACHE_RESPONSE_LATENCY );
-PARAM( L2_RESPONSE_LATENCY );
-PARAM( L2_TAG_LATENCY );
-PARAM( L1_RESPONSE_LATENCY );
-PARAM( MEMORY_RESPONSE_LATENCY_MINUS_2 );
-PARAM( DIRECTORY_LATENCY );
-PARAM( NETWORK_LINK_LATENCY );
-PARAM( COPY_HEAD_LATENCY );
-PARAM( ON_CHIP_LINK_LATENCY );
-PARAM( RECYCLE_LATENCY );
-PARAM( L2_RECYCLE_LATENCY );
-PARAM( TIMER_LATENCY );
-PARAM( TBE_RESPONSE_LATENCY );
-PARAM_BOOL( PERIODIC_TIMER_WAKEUPS );
-
-// constants used by TM protocols
-PARAM_BOOL( PROFILE_EXCEPTIONS );
-PARAM_BOOL( PROFILE_XACT );
-PARAM_BOOL( PROFILE_NONXACT );
-PARAM_BOOL( XACT_DEBUG );
-PARAM ( XACT_DEBUG_LEVEL );
-PARAM_BOOL( XACT_MEMORY );
-PARAM_BOOL( XACT_ENABLE_TOURMALINE );
-PARAM( XACT_NUM_CURRENT );
-PARAM( XACT_LAST_UPDATE );
-PARAM_BOOL( XACT_ISOLATION_CHECK );
-PARAM_BOOL( PERFECT_FILTER );
-PARAM_STRING( READ_WRITE_FILTER );
-PARAM_BOOL( PERFECT_VIRTUAL_FILTER );
-PARAM_STRING( VIRTUAL_READ_WRITE_FILTER );
-PARAM_BOOL( PERFECT_SUMMARY_FILTER );
-PARAM_STRING( SUMMARY_READ_WRITE_FILTER );
-PARAM_BOOL( XACT_EAGER_CD );
-PARAM_BOOL( XACT_LAZY_VM );
-PARAM_STRING( XACT_CONFLICT_RES );
-PARAM_BOOL( XACT_VISUALIZER );
-PARAM( XACT_COMMIT_TOKEN_LATENCY ) ;
-PARAM_BOOL( XACT_NO_BACKOFF );
-PARAM ( XACT_LOG_BUFFER_SIZE );
-PARAM ( XACT_STORE_PREDICTOR_HISTORY);
-PARAM ( XACT_STORE_PREDICTOR_ENTRIES);
-PARAM ( XACT_STORE_PREDICTOR_THRESHOLD);
-PARAM ( XACT_FIRST_ACCESS_COST );
-PARAM ( XACT_FIRST_PAGE_ACCESS_COST );
-PARAM_BOOL( ENABLE_MAGIC_WAITING );
-PARAM_BOOL( ENABLE_WATCHPOINT );
-PARAM_BOOL( XACT_ENABLE_VIRTUALIZATION_LOGTM_SE );
-
-// ATMTP
-PARAM_BOOL( ATMTP_ENABLED );
-PARAM_BOOL( ATMTP_ABORT_ON_NON_XACT_INST );
-PARAM_BOOL( ATMTP_ALLOW_SAVE_RESTORE_IN_XACT );
-PARAM( ATMTP_XACT_MAX_STORES );
-PARAM( ATMTP_DEBUG_LEVEL );
+//PARAM( DIRECTORY_CACHE_LATENCY, 6, false );
+
+//PARAM( NULL_LATENCY, 1, false );
+//PARAM( ISSUE_LATENCY, 2, false );
+//PARAM( CACHE_RESPONSE_LATENCY, 12, false );
+//PARAM( L2_RESPONSE_LATENCY, 6, false );
+//PARAM( L2_TAG_LATENCY, 6, false );
+//PARAM( L1_RESPONSE_LATENCY, 3, false );
+
+//PARAM( MEMORY_RESPONSE_LATENCY_MINUS_2, 158, false );
+//PARAM( DirectoryLatency, 6, false );
+
+//PARAM( NetworkLinkLatency, 1, false );
+//PARAM( COPY_HEAD_LATENCY, 4, false );
+//PARAM( OnChipLinkLatency, 1, false );
+//PARAM( RecycleLatency, 10, false );
+//PARAM( L2_RECYCLE_LATENCY, 5, false );
+//PARAM( TIMER_LATENCY, 10000, false );
+//PARAM( TBE_RESPONSE_LATENCY, 1, false );
+//PARAM_BOOL( PERIODIC_TIMER_WAKEUPS, true, false );
// constants used by CMP protocols
-PARAM( L1_REQUEST_LATENCY );
-PARAM( L2_REQUEST_LATENCY );
-PARAM_BOOL( SINGLE_ACCESS_L2_BANKS ); // hack to simulate multi-cycle L2 bank accesses
+//PARAM( L1_REQUEST_LATENCY, 2, false );
+//PARAM( L2_REQUEST_LATENCY, 4, false );
+//PARAM_BOOL( SINGLE_ACCESS_L2_BANKS, true, false ); // hack to simulate multi-cycle L2 bank accesses
// Ruby cycles between when a sequencer issues a miss it arrives at
// the L1 cache controller
-PARAM( SEQUENCER_TO_CONTROLLER_LATENCY );
+//PARAM( SequencerToControllerLatency, 4, false );
// Number of transitions each controller state machines can complete per cycle
-PARAM( L1CACHE_TRANSITIONS_PER_RUBY_CYCLE );
-PARAM( L2CACHE_TRANSITIONS_PER_RUBY_CYCLE );
-PARAM( DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE );
-
-// Maximum number of requests (including prefetches) outstanding from
-// the sequencer (Note: this also include items buffered in the store
-// buffer)
-PARAM( g_SEQUENCER_OUTSTANDING_REQUESTS );
+//PARAM( L1CacheTransitionsPerCycle, 32, false );
+//PARAM( L2CACHE_TRANSITIONS_PER_RUBY_CYCLE, 32, false );
+//PARAM( DirectoryTransitionsPerCycle, 32, false );
+//PARAM( DMATransitionsPerCycle, 1, false );
// Number of TBEs available for demand misses, prefetches, and replacements
-PARAM( NUMBER_OF_TBES );
-PARAM( NUMBER_OF_L1_TBES );
-PARAM( NUMBER_OF_L2_TBES );
+//PARAM( NumberOfTBEs, 128, false );
+//PARAM( NumberOfL1TBEs, 32, false );
+//PARAM( NumberOfL2TBEs, 32, false );
// NOTE: Finite buffering allows us to simulate a wormhole routed network
// with idealized flow control. All message buffers within the network (i.e.
// the switch's input and output buffers) are set to the size specified below
// by the PROTOCOL_BUFFER_SIZE
-PARAM_BOOL( FINITE_BUFFERING );
-PARAM( FINITE_BUFFER_SIZE ); // Zero is unbounded buffers
+//PARAM_BOOL( FiniteBuffering, false, false );
+//PARAM( FiniteBufferSize, 3, false ); // Zero is unbounded buffers
// Number of requests buffered between the sequencer and the L1 conroller
// This can be more accurately simulated in Opal, therefore it's set to an
// infinite number
// Only effects the simualtion when FINITE_BUFFERING is enabled
-PARAM( PROCESSOR_BUFFER_SIZE );
+//PARAM( ProcessorBufferSize, 10, false );
// The PROTOCOL_BUFFER_SIZE limits the size of all other buffers connecting to
// Controllers. Controlls the number of request issued by the L2 HW Prefetcher
-PARAM( PROTOCOL_BUFFER_SIZE );
-
-// Enable the TSO (Total Store Order) memory model
-PARAM_BOOL( TSO ); // Note: This also disables the "write" STCs
+//PARAM( ProtocolBufferSize, 32, false );
// NETWORK PARAMETERS
// Network Topology: See TopologyType in external.sm for valid values
-PARAM_STRING( g_NETWORK_TOPOLOGY );
+//PARAM_STRING( NetworkTopology, "PT_TO_PT", false );
// Cache Design specifies file prefix for topology
-PARAM_STRING( g_CACHE_DESIGN );
-
-PARAM( g_endpoint_bandwidth );
-PARAM_BOOL( g_adaptive_routing );
-PARAM( NUMBER_OF_VIRTUAL_NETWORKS );
-PARAM( FAN_OUT_DEGREE );
-PARAM_BOOL( g_PRINT_TOPOLOGY );
+//PARAM_STRING( CacheDesign, "NUCA", false );
-// transactional memory
-PARAM( XACT_LENGTH );
-PARAM( XACT_SIZE );
-PARAM( ABORT_RETRY_TIME );
+//PARAM( EndpointBandwidth, 10000, false );
+//PARAM_BOOL( AdaptiveRouting, true, false );
+//PARAM( NumberOfVirtualNetworks, 6, false );
+//PARAM( FanOutDegree, 4, false );
+//PARAM_BOOL( PrintTopology, true, false );
// Princeton Network (Garnet)
-PARAM_BOOL( g_GARNET_NETWORK );
-PARAM_BOOL( g_DETAIL_NETWORK );
-PARAM_BOOL( g_NETWORK_TESTING );
-PARAM( g_FLIT_SIZE );
-PARAM( g_NUM_PIPE_STAGES );
-PARAM( g_VCS_PER_CLASS );
-PARAM( g_BUFFER_SIZE );
+//PARAM_BOOL( UsingGarnetNetwork, true, false );
+//PARAM_BOOL( UsingDetailNetwork, false, false );
+//PARAM_BOOL( UsingNetworkTesting, false, false );
+//PARAM( FlitSize, 16, false );
+//PARAM( NumberOfPipeStages, 4, false );
+//PARAM( VCSPerClass, 4, false );
+//PARAM( BufferSize, 4, false );
// MemoryControl:
-PARAM( MEM_BUS_CYCLE_MULTIPLIER );
-PARAM( BANKS_PER_RANK );
-PARAM( RANKS_PER_DIMM );
-PARAM( DIMMS_PER_CHANNEL );
-PARAM( BANK_BIT_0 );
-PARAM( RANK_BIT_0 );
-PARAM( DIMM_BIT_0 );
-PARAM( BANK_QUEUE_SIZE );
-PARAM( BANK_BUSY_TIME );
-PARAM( RANK_RANK_DELAY );
-PARAM( READ_WRITE_DELAY );
-PARAM( BASIC_BUS_BUSY_TIME );
-PARAM( MEM_CTL_LATENCY );
-PARAM( REFRESH_PERIOD );
-PARAM( TFAW );
-PARAM( MEM_RANDOM_ARBITRATE );
-PARAM( MEM_FIXED_DELAY );
+//PARAM( MEM_BUS_CYCLE_MULTIPLIER, 10, false );
+//PARAM( BANKS_PER_RANK, 8, false );
+//PARAM( RANKS_PER_DIMM, 2, false );
+//PARAM( DIMMS_PER_CHANNEL, 2, false );
+//PARAM( BANK_BIT_0, 8, false );
+//PARAM( RANK_BIT_0, 11, false );
+//PARAM( DIMM_BIT_0, 12, false );
+//PARAM( BANK_QUEUE_SIZE, 12, false );
+//PARAM( BankBusyTime, 11, false );
+//PARAM( RANK_RANK_DELAY, 1, false );
+//PARAM( READ_WRITE_DELAY, 2, false );
+//PARAM( BASIC_BUS_BUSY_TIME, 2, false );
+//PARAM( MEM_CTL_LATENCY, 12, false );
+//PARAM( REFRESH_PERIOD, 1560, false );
+//PARAM( TFAW, 0, false );
+//PARAM( MEM_RANDOM_ARBITRATE, 0, false );
+//PARAM( MEM_FIXED_DELAY, 0, false );
diff --git a/src/mem/ruby/config/defaults.rb b/src/mem/ruby/config/defaults.rb
new file mode 100644
index 000000000..110bf4241
--- /dev/null
+++ b/src/mem/ruby/config/defaults.rb
@@ -0,0 +1,181 @@
+#!/usr/bin/ruby
+
+
+
+class NetPort < LibRubyObject
+ # number of transitions a SLICC state machine can transition per
+ # cycle
+ default_param :transitions_per_cycle, Integer, 32
+
+ # buffer_size limits the size of all other buffers connecting to
+ # SLICC Controllers. When 0, infinite buffering is used.
+ default_param :buffer_size, Integer, 32
+
+ # added by SS for TBE
+ default_param :number_of_TBEs, Integer, 128
+
+ default_param :recycle_latency, Integer, 10
+end
+
+class Sequencer < IfacePort
+ # Maximum number of requests (including prefetches) outstanding from
+ # the sequencer
+ default_param :max_outstanding_requests, Integer, 16
+
+ # Maximum number of cycles a request is can be outstanding before
+ # the Sequencer declares we're in deadlock/livelock
+ default_param :deadlock_threshold, Integer, 500000
+
+end
+
+class Debug < LibRubyObject
+ # For debugging purposes, one can enable a trace of all the protocol
+ # state machine changes. Unfortunately, the code to generate the
+ # trace is protocol specific. To enable the code for some of the
+ # standard protocols,
+ # 1. change protocol_trace = true
+ # 2. enable debug in the Ruby Makefile
+ # 3. set start_time = 1
+ default_param :protocol_trace, Boolean, false
+
+ # a string for filtering debugging output (for all g_debug vars see Debug.h)
+ default_param :filter_string, String, "q"
+
+ # filters debugging messages based on priority (low, med, high)
+ default_param :verbosity_string, String, "none"
+
+ # filters debugging messages based on a ruby time
+ default_param :start_time, Integer, 1
+
+ # sends debugging messages to a output filename
+ default_param :output_filename, String, ""
+end
+
+class Topology < LibRubyObject
+ # The default link latency between all nodes (internal and external)
+ # in the toplogy
+ default_param :link_latency, Integer, 1
+
+ # the bandwidth from an external network port to it's corresponding
+ # internal switch
+ default_param :external_bw, Integer, 64
+
+ # the bandwitch between internal switches in the network
+ default_param :internal_bw, Integer, 16
+
+ # indicates whether the topology config will be displayed in the
+ # stats file
+ default_param :print_config, Boolean, true
+end
+
+class Network < LibRubyObject
+ default_param :endpoint_bandwidth, Integer, 10000
+ default_param :adaptive_routing, Boolean, true
+ default_param :number_of_virtual_networks, Integer, 6
+ default_param :fan_out_degree, Integer, 4
+
+ # default buffer size. Setting to 0 indicates infinite buffering
+ default_param :buffer_size, Integer, 3
+
+ # local memory latency ?? NetworkLinkLatency
+ default_param :link_latency, Integer, 1
+
+ # on chip latency
+ default_param :on_chip_latency, Integer, 1
+end
+
+class GarnetNetwork < Network
+ default_param :flit_size, Integer, 16
+ default_param :number_of_pipe_stages, Integer, 4
+ default_param :vcs_per_class, Integer, 4
+ default_param :buffer_size, Integer, 4
+ default_param :using_network_testing, Boolean, false
+end
+
+
+
+#added by SS
+class Tracer < LibRubyObject
+ default_param :warmup_length, Integer, 1000000
+end
+
+#added by SS
+class Profiler < LibRubyObject
+ default_param :hot_lines, Boolean, false
+ default_param :all_instructions, Boolean, false
+end
+
+#added by SS
+class MI_example_CacheController < CacheController
+ default_param :issue_latency, Integer, 2
+ default_param :cache_response_latency, Integer, 12
+end
+
+class MI_example_DirectoryController < DirectoryController
+ default_param :to_mem_ctrl_latency, Integer, 1
+ default_param :directory_latency, Integer, 6
+ default_param :memory_latency, Integer, 158
+end
+
+
+#added by SS
+class MemoryControl < LibRubyObject
+
+ default_param :mem_bus_cycle_multiplier, Integer, 10
+ default_param :banks_per_rank, Integer, 8
+ default_param :ranks_per_dimm, Integer, 2
+ default_param :dimms_per_channel, Integer, 2
+ default_param :bank_bit_0, Integer, 8
+ default_param :rank_bit_0, Integer, 11
+ default_param :dimm_bit_0, Integer, 12
+ default_param :bank_queue_size, Integer, 12
+ default_param :bank_busy_time, Integer, 11
+ default_param :rank_rank_delay, Integer, 1
+ default_param :read_write_delay, Integer, 2
+ default_param :basic_bus_busy_time, Integer, 2
+ default_param :mem_ctl_latency, Integer, 12
+ default_param :refresh_period, Integer, 1560
+ default_param :tFaw, Integer, 0
+ default_param :mem_random_arbitrate, Integer, 0
+ default_param :mem_fixed_delay, Integer, 0
+
+end
+
+class RubySystem
+
+ # Random seed used by the simulation. If set to "rand", the seed
+ # will be set to the current wall clock at libruby
+ # initialization. Otherwise, set this to an integer.
+ default_param :random_seed, Object, "rand"
+
+ # When set to true, the simulation will insert random delays on
+ # message enqueue times. Note that even if this is set to false,
+ # you can still have a non-deterministic simulation if random seed
+ # is set to "rand". This is because the Ruby swtiches use random
+ # link priority elevation
+ default_param :randomization, Boolean, false
+
+ # tech_nm is the device size used to calculate latency and area
+ # information about system components
+ default_param :tech_nm, Integer, 45
+
+ # default frequency for the system
+ default_param :freq_mhz, Integer, 3000
+
+ # the default cache block size in the system
+ # libruby does not currently support different block sizes
+ # among different caches
+ # Must be a power of two
+ default_param :block_size_bytes, Integer, 64
+
+ # The default debug object. There shouldn't be a reason to ever
+ # change this line. To adjust debug paramters statically, adjust
+ # them in the Debug class above. To adjust these fields
+ # dynamically, access this RubySystem object,
+ # e.g. RubySystem.debug.protocol_trace = true
+ default_param :debug, Debug, Debug.new("dbg0")
+ default_param :tracer, Tracer, Tracer.new("tracer0")
+
+ default_param :profiler, Profiler, Profiler.new("profiler0")
+end
+
diff --git a/src/mem/ruby/config/libruby_cfg_test.cc b/src/mem/ruby/config/libruby_cfg_test.cc
new file mode 100644
index 000000000..5d5b69d5f
--- /dev/null
+++ b/src/mem/ruby/config/libruby_cfg_test.cc
@@ -0,0 +1,14 @@
+
+#include <iostream>
+#include <assert.h>
+
+#include "../libruby.hh"
+
+int main(int argc, char* argv[])
+{
+ assert(argc == 2);
+ const char* cfg_file = argv[1];
+
+ libruby_init(cfg_file);
+ libruby_print_config(std::cout);
+}
diff --git a/src/mem/ruby/config/print_cfg.rb b/src/mem/ruby/config/print_cfg.rb
new file mode 100644
index 000000000..0a6d180d4
--- /dev/null
+++ b/src/mem/ruby/config/print_cfg.rb
@@ -0,0 +1,14 @@
+
+ruby_cfg_file = nil
+$stderr.puts $*.inspect
+for i in 0..$*.size-1 do
+ if $*[i] == "-r" # ruby config file
+ i += 1
+ ruby_cfg_file = $*[i]
+ break
+ end
+end
+
+require ruby_cfg_file
+
+RubySystem.generateConfig
diff --git a/src/mem/ruby/config/rubyconfig.defaults b/src/mem/ruby/config/rubyconfig.defaults
index 873192c05..936a2f091 100644
--- a/src/mem/ruby/config/rubyconfig.defaults
+++ b/src/mem/ruby/config/rubyconfig.defaults
@@ -43,10 +43,6 @@ g_DEADLOCK_THRESHOLD: 500000
// (does not apply when running Opal)
SIMICS_RUBY_MULTIPLIER: 4
-// corresponding parameter when using Opal+Ruby+Simics
-OPAL_RUBY_MULTIPLIER: 1
-
-
// Ruby cycles between when a sequencer issues a request and it arrives at
// the L1 cache controller
//
@@ -107,7 +103,7 @@ L2_CACHE_ASSOC: 4
L2_CACHE_NUM_SETS_BITS: 16
// 32 bits = 4 GB address space
-g_MEMORY_SIZE_BYTES: 4294967296
+g_MEMORY_SIZE_BYTES: 1073741824 //4294967296
g_DATA_BLOCK_BYTES: 64
g_PAGE_SIZE_BYTES: 4096
g_REPLACEMENT_POLICY: PSEDUO_LRU // currently, only other option is LRU
@@ -176,8 +172,6 @@ L1_REQUEST_LATENCY: 2
L2_REQUEST_LATENCY: 4
-
-
// Number of transitions each controller state machines can complete per cycle
// i.e. the number of ports to each controller
// L1cache is the sum of the L1I and L1D cache ports
@@ -186,6 +180,7 @@ L1CACHE_TRANSITIONS_PER_RUBY_CYCLE: 32
// much greater constraint on the concurrency of a L2 cache bank
L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 32
DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 32
+DMA_TRANSITIONS_PER_RUBY_CYCLE: 1
// Number of TBEs available for demand misses, ALL prefetches, and replacements
@@ -195,10 +190,6 @@ NUMBER_OF_TBES: 128
NUMBER_OF_L1_TBES: 32
NUMBER_OF_L2_TBES: 32
-// TSO is deprecated
-TSO: false
-
-
// ** INTERCONECT PARAMETERS **
//
g_PRINT_TOPOLOGY: true
@@ -207,7 +198,7 @@ g_CACHE_DESIGN: NUCA // specifies file prefix for FILE_SPECIFIED topology
FAN_OUT_DEGREE: 4 // for HIERARCHICAL SWITCH topology
g_adaptive_routing: true
-NUMBER_OF_VIRTUAL_NETWORKS: 4
+NUMBER_OF_VIRTUAL_NETWORKS: 6
// bandwidth unit is 1/1000 byte per cycle. the following parameter is multiplied by
// topology specific link weights
@@ -240,57 +231,6 @@ PROTOCOL_BUFFER_SIZE: 32
SINGLE_ACCESS_L2_BANKS: true
-// constants used by TM protocols
-PROFILE_EXCEPTIONS: false
-PROFILE_XACT: true
-PROFILE_NONXACT: false
-XACT_DEBUG: true
-XACT_DEBUG_LEVEL: 1
-//XACT_MEMORY: true // set to true for TM protocols. set it HERE for lazy systems to register the proper SIMICS interfaces
-XACT_MEMORY: false
-XACT_ENABLE_TOURMALINE: false // perfect memory system
-XACT_NUM_CURRENT: 0 // must be 0
-XACT_LAST_UPDATE: 0 // must be 0
-XACT_ISOLATION_CHECK: false // Checks whether each memory access preserves transaction isolation
-PERFECT_FILTER: true // If true, use perfect physical read/write filters
-READ_WRITE_FILTER: Perfect_
-PERFECT_VIRTUAL_FILTER: true // If true, use perfect virtual read/write filters
-VIRTUAL_READ_WRITE_FILTER: Perfect_
-PERFECT_SUMMARY_FILTER: true // If true, use perfect summary read/write filters
-SUMMARY_READ_WRITE_FILTER: Perfect_
-XACT_EAGER_CD: true
-XACT_LAZY_VM: false
-XACT_CONFLICT_RES: BASE
-XACT_COMMIT_TOKEN_LATENCY: 0
-XACT_VISUALIZER: false
-XACT_NO_BACKOFF: false
-XACT_LOG_BUFFER_SIZE: 0
-XACT_STORE_PREDICTOR_ENTRIES: 256
-XACT_STORE_PREDICTOR_HISTORY: 256
-XACT_STORE_PREDICTOR_THRESHOLD: 4
-XACT_FIRST_ACCESS_COST: 0
-XACT_FIRST_PAGE_ACCESS_COST: 0
-ENABLE_MAGIC_WAITING: false
-ENABLE_WATCHPOINT: false
-XACT_ENABLE_VIRTUALIZATION_LOGTM_SE: false
-// g_NETWORK_TOPOLOGY: FILE_SPECIFIED
-// NUMBER_OF_VIRTUAL_NETWORKS: 5
-// L2_REQUEST_LATENCY: 15
-// SEQUENCER_TO_CONTROLLER_LATENCY: 3
-// L2_RESPONSE_LATENCY: 20
-// L2_TAG_LATENCY: 6
-// MEMORY_RESPONSE_LATENCY_MINUS_2: 448
-// RECYCLE_LATENCY: 1
-// g_MEMORY_SIZE_BYTES: 268435456
-// REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH: true
-
-// ATMTP
-ATMTP_ENABLED: false
-ATMTP_ABORT_ON_NON_XACT_INST: false
-ATMTP_ALLOW_SAVE_RESTORE_IN_XACT: false
-ATMTP_XACT_MAX_STORES: 32
-ATMTP_DEBUG_LEVEL: 0
-
// MOESI_CMP_token parameters (some might be deprecated)
g_FILTERING_ENABLED: false
g_DISTRIBUTED_PERSISTENT_ENABLED: true
@@ -321,7 +261,7 @@ g_hold_time: 5
g_wait_time: 5
// Princeton Network (Garnet)
-g_GARNET_NETWORK: false
+g_GARNET_NETWORK: true
g_DETAIL_NETWORK: false
g_NETWORK_TESTING: false
g_FLIT_SIZE: 16
diff --git a/src/mem/ruby/config/tester.defaults b/src/mem/ruby/config/tester.defaults
index 6ba655770..b30d1ba99 100644
--- a/src/mem/ruby/config/tester.defaults
+++ b/src/mem/ruby/config/tester.defaults
@@ -6,10 +6,11 @@
// Please: - Add new variables only to rubyconfig.defaults file.
// - Change them here only when necessary.
+g_SIMICS: false
DATA_BLOCK: true
RANDOMIZATION: true
-g_SYNTHETIC_DRIVER: true
-g_DETERMINISTIC_DRIVER: false
+g_SYNTHETIC_DRIVER: false
+g_DETERMINISTIC_DRIVER: true
g_DEADLOCK_THRESHOLD: 500000
g_SpecifiedGenerator: DetermGETXGenerator
@@ -28,23 +29,13 @@ L2_CACHE_NUM_SETS_BITS: 5
g_MEMORY_SIZE_BYTES: 1048576
-// XACT MEMORY
-XACT_LENGTH: 2000
-XACT_SIZE: 1000
-ABORT_RETRY_TIME: 400
-XACT_ISOLATION_CHECK: true
-L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 1000
-DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 1000
-PERFECT_FILTER: true // If true, use perfect read/write filters
-READ_WRITE_FILTER: Perfect_
-
//g_NETWORK_TOPOLOGY: FILE_SPECIFIED
RECYCLE_LATENCY: 1
//NUMBER_OF_VIRTUAL_NETWORKS: 5
//g_NUM_MEMORIES: 16
L2CACHE_TRANSITIONS_PER_RUBY_CYCLE: 1000
DIRECTORY_TRANSITIONS_PER_RUBY_CYCLE: 1000
-//g_PROCS_PER_CHIP: 16
+//g_PROCS_PER_CHIP: 2
//g_NUM_L2_BANKS: 16
//g_endpoint_bandwidth: 10000
//g_NUM_PROCESSORS: 16
diff --git a/src/mem/ruby/eventqueue/RubyEventQueue.cc b/src/mem/ruby/eventqueue/RubyEventQueue.cc
index 4a979942f..1a4159f1d 100644
--- a/src/mem/ruby/eventqueue/RubyEventQueue.cc
+++ b/src/mem/ruby/eventqueue/RubyEventQueue.cc
@@ -49,7 +49,7 @@ RubyEventQueue::RubyEventQueue()
RubyEventQueue::~RubyEventQueue()
{
- // delete m_prio_heap_ptr;
+ delete m_prio_heap_ptr;
}
void RubyEventQueue::init()
@@ -91,7 +91,7 @@ void RubyEventQueue::triggerEvents(Time t)
assert(thisNode.m_consumer_ptr != NULL);
DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,*(thisNode.m_consumer_ptr));
DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,thisNode.m_time);
- thisNode.m_consumer_ptr->triggerWakeup();
+ thisNode.m_consumer_ptr->triggerWakeup(this);
}
m_globalTime = t;
}
@@ -107,7 +107,7 @@ void RubyEventQueue::triggerAllEvents()
assert(thisNode.m_consumer_ptr != NULL);
DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,*(thisNode.m_consumer_ptr));
DEBUG_EXPR(EVENTQUEUE_COMP,MedPrio,thisNode.m_time);
- thisNode.m_consumer_ptr->triggerWakeup();
+ thisNode.m_consumer_ptr->triggerWakeup(this);
}
}
diff --git a/src/mem/ruby/filters/AbstractBloomFilter.hh b/src/mem/ruby/filters/AbstractBloomFilter.hh
new file mode 100644
index 000000000..7e37a6e06
--- /dev/null
+++ b/src/mem/ruby/filters/AbstractBloomFilter.hh
@@ -0,0 +1,71 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AbstractBloomFilter.hh
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef ABSTRACT_BLOOM_FILTER_H
+#define ABSTRACT_BLOOM_FILTER_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+
+class AbstractBloomFilter {
+public:
+
+ virtual ~AbstractBloomFilter() {};
+ virtual void clear() = 0;
+ virtual void increment(const Address& addr) = 0;
+ virtual void decrement(const Address& addr) = 0;
+ virtual void merge(AbstractBloomFilter * other_filter) = 0;
+ virtual void set(const Address& addr) = 0;
+ virtual void unset(const Address& addr) = 0;
+
+ virtual bool isSet(const Address& addr) = 0;
+ virtual int getCount(const Address& addr) = 0;
+ virtual int getTotalCount() = 0;
+
+ virtual void print(ostream& out) const = 0;
+
+ virtual int getIndex(const Address& addr) = 0;
+ virtual int readBit(const int index) = 0;
+ virtual void writeBit(const int index, const int value) = 0;
+
+private:
+
+};
+
+
+#endif
diff --git a/src/mem/ruby/filters/BlockBloomFilter.cc b/src/mem/ruby/filters/BlockBloomFilter.cc
new file mode 100644
index 000000000..9bf5b41c5
--- /dev/null
+++ b/src/mem/ruby/filters/BlockBloomFilter.cc
@@ -0,0 +1,147 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BlockBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/BlockBloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+BlockBloomFilter::BlockBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_filter.setSize(m_filter_size);
+
+ clear();
+}
+
+BlockBloomFilter::~BlockBloomFilter(){
+}
+
+void BlockBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void BlockBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void BlockBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void BlockBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void BlockBloomFilter::set(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 1;
+}
+
+void BlockBloomFilter::unset(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 0;
+}
+
+bool BlockBloomFilter::isSet(const Address& addr)
+{
+ int i = get_index(addr);
+ return (m_filter[i]);
+}
+
+
+int BlockBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int BlockBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ if (m_filter[i]) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int BlockBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+void BlockBloomFilter::print(ostream& out) const
+{
+}
+
+int BlockBloomFilter::readBit(const int index) {
+ return m_filter[index];
+}
+
+void BlockBloomFilter::writeBit(const int index, const int value) {
+ m_filter[index] = value;
+}
+
+int BlockBloomFilter::get_index(const Address& addr)
+{
+ // Pull out some bit field ==> B1
+ // Pull out additional bits, not the same as B1 ==> B2
+ // XOR B1 and B2 to get hash index
+ physical_address_t block_bits = addr.bitSelect( RubySystem::getBlockSizeBits(), 2*RubySystem::getBlockSizeBits() - 1);
+ int offset = 5;
+ physical_address_t other_bits = addr.bitSelect( 2*RubySystem::getBlockSizeBits() + offset, 2*RubySystem::getBlockSizeBits() + offset + m_filter_size_bits - 1);
+ int index = block_bits ^ other_bits;
+ assert(index < m_filter_size);
+ return index;
+}
+
+
diff --git a/src/mem/ruby/filters/BlockBloomFilter.hh b/src/mem/ruby/filters/BlockBloomFilter.hh
new file mode 100644
index 000000000..205b4172d
--- /dev/null
+++ b/src/mem/ruby/filters/BlockBloomFilter.hh
@@ -0,0 +1,82 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BlockBloomFilter.hh
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef BLOCK_BLOOM_FILTER_H
+#define BLOCK_BLOOM_FILTER_H
+
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
+
+class BlockBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~BlockBloomFilter();
+ BlockBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/filters/BulkBloomFilter.cc b/src/mem/ruby/filters/BulkBloomFilter.cc
new file mode 100644
index 000000000..264b4201c
--- /dev/null
+++ b/src/mem/ruby/filters/BulkBloomFilter.cc
@@ -0,0 +1,232 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BulkBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/BulkBloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+BulkBloomFilter::BulkBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+ // split the filter bits in half, c0 and c1
+ m_sector_bits = m_filter_size_bits - 1;
+
+ m_temp_filter.setSize(m_filter_size);
+ m_filter.setSize(m_filter_size);
+ clear();
+
+ // clear temp filter
+ for(int i=0; i < m_filter_size; ++i){
+ m_temp_filter[i] = 0;
+ }
+}
+
+BulkBloomFilter::~BulkBloomFilter(){
+
+}
+
+void BulkBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void BulkBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void BulkBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void BulkBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void BulkBloomFilter::set(const Address& addr)
+{
+ // c0 contains the cache index bits
+ int set_bits = m_sector_bits;
+ int block_bits = RubySystem::getBlockSizeBits();
+ int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
+ // c1 contains the lower m_sector_bits permuted bits
+ //Address permuted_bits = permute(addr);
+ //int c1 = permuted_bits.bitSelect(0, set_bits-1);
+ int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
+ //ASSERT(c0 < (m_filter_size/2));
+ //ASSERT(c0 + (m_filter_size/2) < m_filter_size);
+ //ASSERT(c1 < (m_filter_size/2));
+ // set v0 bit
+ m_filter[c0 + (m_filter_size/2)] = 1;
+ // set v1 bit
+ m_filter[c1] = 1;
+}
+
+void BulkBloomFilter::unset(const Address& addr)
+{
+ // not used
+}
+
+bool BulkBloomFilter::isSet(const Address& addr)
+{
+ // c0 contains the cache index bits
+ int set_bits = m_sector_bits;
+ int block_bits = RubySystem::getBlockSizeBits();
+ int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
+ // c1 contains the lower 10 permuted bits
+ //Address permuted_bits = permute(addr);
+ //int c1 = permuted_bits.bitSelect(0, set_bits-1);
+ int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
+ //ASSERT(c0 < (m_filter_size/2));
+ //ASSERT(c0 + (m_filter_size/2) < m_filter_size);
+ //ASSERT(c1 < (m_filter_size/2));
+ // set v0 bit
+ m_temp_filter[c0 + (m_filter_size/2)] = 1;
+ // set v1 bit
+ m_temp_filter[c1] = 1;
+
+ // perform filter intersection. If any c part is 0, no possibility of address being in signature.
+ // get first c intersection part
+ bool zero = false;
+ for(int i=0; i < m_filter_size/2; ++i){
+ // get intersection of signatures
+ m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
+ zero = zero || m_temp_filter[i];
+ }
+ zero = !zero;
+ if(zero){
+ // one section is zero, no possiblility of address in signature
+ // reset bits we just set
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return false;
+ }
+
+ // check second section
+ zero = false;
+ for(int i=m_filter_size/2; i < m_filter_size; ++i){
+ // get intersection of signatures
+ m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
+ zero = zero || m_temp_filter[i];
+ }
+ zero = !zero;
+ if(zero){
+ // one section is zero, no possiblility of address in signature
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return false;
+ }
+ // one section has at least one bit set
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return true;
+}
+
+
+int BulkBloomFilter::getCount(const Address& addr)
+{
+ // not used
+ return 0;
+}
+
+int BulkBloomFilter::getTotalCount()
+{
+ int count = 0;
+ for (int i = 0; i < m_filter_size; i++) {
+ if (m_filter[i]) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int BulkBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+int BulkBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void BulkBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+void BulkBloomFilter::print(ostream& out) const
+{
+}
+
+int BulkBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_filter_size_bits - 1);
+}
+
+Address BulkBloomFilter::permute(const Address & addr){
+ // permutes the original address bits according to Table 5
+ int block_offset = RubySystem::getBlockSizeBits();
+ physical_address_t part1 = addr.bitSelect( block_offset, block_offset + 6 );
+ physical_address_t part2 = addr.bitSelect( block_offset + 9, block_offset + 9 );
+ physical_address_t part3 = addr.bitSelect( block_offset + 11, block_offset + 11 );
+ physical_address_t part4 = addr.bitSelect( block_offset + 17, block_offset + 17 );
+ physical_address_t part5 = addr.bitSelect( block_offset + 7, block_offset + 8 );
+ physical_address_t part6 = addr.bitSelect( block_offset + 10, block_offset + 10 );
+ physical_address_t part7 = addr.bitSelect( block_offset + 12, block_offset + 12 );
+ physical_address_t part8 = addr.bitSelect( block_offset + 13, block_offset + 13 );
+ physical_address_t part9 = addr.bitSelect( block_offset + 15, block_offset + 16 );
+ physical_address_t part10 = addr.bitSelect( block_offset + 18, block_offset + 20 );
+ physical_address_t part11 = addr.bitSelect( block_offset + 14, block_offset + 14 );
+
+ physical_address_t result = (part1 << 14 ) | (part2 << 13 ) | (part3 << 12 ) | (part4 << 11 ) | (part5 << 9) | (part6 << 8)
+ | (part7 << 7) | (part8 << 6) | (part9 << 4) | (part10 << 1) | (part11);
+ // assume 32 bit addresses (both virtual and physical)
+ // select the remaining high-order 11 bits
+ physical_address_t remaining_bits = (addr.bitSelect( block_offset + 21, 31 )) << 21;
+ result = result | remaining_bits;
+
+ return Address(result);
+}
diff --git a/src/mem/ruby/tester/Tester.hh b/src/mem/ruby/filters/BulkBloomFilter.hh
index 7b721e038..2dbdb6612 100644
--- a/src/mem/ruby/tester/Tester.hh
+++ b/src/mem/ruby/filters/BulkBloomFilter.hh
@@ -28,66 +28,60 @@
*/
/*
- * $Id$
+ * BulkBloomFilter.hh
*
* Description:
*
+ *
*/
-#ifndef TESTER_H
-#define TESTER_H
+#ifndef BULK_BLOOM_FILTER_H
+#define BULK_BLOOM_FILTER_H
+#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Driver.hh"
-#include "mem/ruby/tester/CheckTable.hh"
-#include "mem/protocol/CacheRequestType.hh"
-
-class RubySystem;
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
-class Tester : public Driver, public Consumer {
+class BulkBloomFilter : public AbstractBloomFilter {
public:
- // Constructors
- Tester(RubySystem* sys_ptr);
- // Destructor
- ~Tester();
+ ~BulkBloomFilter();
+ BulkBloomFilter(string config);
- // Public Methods
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
- void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
- void wakeup();
- void printStats(ostream& out) const {}
- void clearStats() {}
- void printConfig(ostream& out) const {}
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
void print(ostream& out) const;
-private:
- // Private Methods
- void checkForDeadlock();
+private:
- // Private copy constructor and assignment operator
- Tester(const Tester& obj);
- Tester& operator=(const Tester& obj);
+ int get_index(const Address& addr);
+ Address permute(const Address & addr);
- // Data Members (m_ prefix)
+ Vector<int> m_filter;
+ Vector<int> m_temp_filter;
- CheckTable m_checkTable;
- Vector<Time> m_last_progress_vector;
-};
+ int m_filter_size;
+ int m_filter_size_bits;
-// Output operator declaration
-ostream& operator<<(ostream& out, const Tester& obj);
+ int m_sector_bits;
-// ******************* Definitions *******************
+ int m_count_bits;
+ int m_count;
+};
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const Tester& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-#endif //TESTER_H
+#endif
diff --git a/src/mem/ruby/filters/GenericBloomFilter.cc b/src/mem/ruby/filters/GenericBloomFilter.cc
new file mode 100644
index 000000000..f4198ef40
--- /dev/null
+++ b/src/mem/ruby/filters/GenericBloomFilter.cc
@@ -0,0 +1,150 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GenericBloomFilter.hh
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+
+#include "mem/ruby/filters/GenericBloomFilter.hh"
+#include "mem/ruby/filters/LSB_CountingBloomFilter.hh"
+#include "mem/ruby/filters/NonCountingBloomFilter.hh"
+#include "mem/ruby/filters/BulkBloomFilter.hh"
+#include "mem/ruby/filters/BlockBloomFilter.hh"
+#include "mem/ruby/filters/MultiGrainBloomFilter.hh"
+#include "mem/ruby/filters/MultiBitSelBloomFilter.hh"
+#include "mem/ruby/filters/H3BloomFilter.hh"
+
+GenericBloomFilter::GenericBloomFilter(string config)
+{
+ string tail(config);
+ string head = string_split(tail,'_');
+
+ if (head == "LSB_Counting" ) {
+ m_filter = new LSB_CountingBloomFilter(tail);
+ }
+ else if(head == "NonCounting" ) {
+ m_filter = new NonCountingBloomFilter(tail);
+ }
+ else if(head == "Bulk" ) {
+ m_filter = new BulkBloomFilter(tail);
+ }
+ else if(head == "Block") {
+ m_filter = new BlockBloomFilter(tail);
+ }
+ else if(head == "Multigrain"){
+ m_filter = new MultiGrainBloomFilter(tail);
+ }
+ else if(head == "MultiBitSel"){
+ m_filter = new MultiBitSelBloomFilter(tail);
+ }
+ else if(head == "H3"){
+ m_filter = new H3BloomFilter(tail);
+ }
+ else {
+ assert(0);
+ }
+}
+
+GenericBloomFilter::~GenericBloomFilter()
+{
+ delete m_filter;
+}
+
+void GenericBloomFilter::clear()
+{
+ m_filter->clear();
+}
+
+void GenericBloomFilter::increment(const Address& addr)
+{
+ m_filter->increment(addr);
+}
+
+void GenericBloomFilter::decrement(const Address& addr)
+{
+ m_filter->decrement(addr);
+}
+
+void GenericBloomFilter::merge(GenericBloomFilter * other_filter)
+{
+ m_filter->merge(other_filter->getFilter());
+}
+
+void GenericBloomFilter::set(const Address& addr)
+{
+ m_filter->set(addr);
+}
+
+void GenericBloomFilter::unset(const Address& addr)
+{
+ m_filter->unset(addr);
+}
+
+bool GenericBloomFilter::isSet(const Address& addr)
+{
+ return m_filter->isSet(addr);
+}
+
+int GenericBloomFilter::getCount(const Address& addr)
+{
+ return m_filter->getCount(addr);
+}
+
+int GenericBloomFilter::getTotalCount()
+{
+ return m_filter->getTotalCount();
+}
+
+int GenericBloomFilter::getIndex(const Address& addr)
+{
+ return m_filter->getIndex(addr);
+}
+
+int GenericBloomFilter::readBit(const int index) {
+ return m_filter->readBit(index);
+}
+
+void GenericBloomFilter::writeBit(const int index, const int value) {
+ m_filter->writeBit(index, value);
+}
+
+void GenericBloomFilter::print(ostream& out) const
+{
+ return m_filter->print(out);
+}
+
+
diff --git a/src/mem/ruby/system/StoreCache.hh b/src/mem/ruby/filters/GenericBloomFilter.hh
index 81eecde38..4ff65f404 100644
--- a/src/mem/ruby/system/StoreCache.hh
+++ b/src/mem/ruby/filters/GenericBloomFilter.hh
@@ -28,58 +28,67 @@
*/
/*
- * $Id$
+ * GenericBloomFilter.hh
*
* Description:
*
+ *
*/
-#ifndef StoreCache_H
-#define StoreCache_H
+#ifndef GENERIC_BLOOM_FILTER_H
+#define GENERIC_BLOOM_FILTER_H
#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
+class GenericBloomFilter {
+public:
-class DataBlock;
-class SubBlock;
-class StoreCacheEntry;
+ // Constructors
+ GenericBloomFilter(string config);
-template <class KEY_TYPE, class VALUE_TYPE> class Map;
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(GenericBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+ AbstractBloomFilter * getFilter(){
+ return m_filter;
+ }
-class StoreCache {
-public:
- // Constructors
- StoreCache();
+ bool isSet(const Address& addr);
+
+ int getCount(const Address& addr);
+
+ int getTotalCount();
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
- // Destructor
- ~StoreCache();
-
- // Public Methods
- void add(const SubBlock& block);
- void remove(const SubBlock& block);
- bool check(const SubBlock& block) const;
- void update(SubBlock& block) const;
- bool isEmpty() const;
- int size() const;
void print(ostream& out) const;
+ void printConfig(ostream& out) { out << "GenericBloomFilter" << endl; }
-private:
- Map<Address, StoreCacheEntry>* m_internal_cache_ptr;
-};
+ // Destructor
+ ~GenericBloomFilter();
-// Output operator declaration
-ostream& operator<<(ostream& out, const StoreCache& obj);
-// ******************* Definitions *******************
+private:
+
+ AbstractBloomFilter* m_filter;
+};
// Output operator definition
extern inline
-ostream& operator<<(ostream& out, const StoreCache& obj)
+ostream& operator<<(ostream& out, const GenericBloomFilter& obj)
{
obj.print(out);
out << flush;
return out;
}
-#endif //StoreCache_H
+
+#endif
diff --git a/src/mem/ruby/filters/H3BloomFilter.cc b/src/mem/ruby/filters/H3BloomFilter.cc
new file mode 100644
index 000000000..2f5a0fc64
--- /dev/null
+++ b/src/mem/ruby/filters/H3BloomFilter.cc
@@ -0,0 +1,210 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/H3BloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+H3BloomFilter::H3BloomFilter(string str)
+{
+ //TODO: change this ugly init code...
+ primes_list[0] = 9323;
+ primes_list[1] = 11279;
+ primes_list[2] = 10247;
+ primes_list[3] = 30637;
+ primes_list[4] = 25717;
+ primes_list[5] = 43711;
+
+ mults_list[0] = 255;
+ mults_list[1] = 29;
+ mults_list[2] = 51;
+ mults_list[3] = 3;
+ mults_list[4] = 77;
+ mults_list[5] = 43;
+
+ adds_list[0] = 841;
+ adds_list[1] = 627;
+ adds_list[2] = 1555;
+ adds_list[3] = 241;
+ adds_list[4] = 7777;
+ adds_list[5] = 65931;
+
+
+
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_num_hashes = atoi(head.c_str());
+
+ if(tail == "Regular") {
+ isParallel = false;
+ } else if (tail == "Parallel") {
+ isParallel = true;
+ } else {
+ cout << "ERROR: Incorrect config string for MultiHash Bloom! :" << str << endl;
+ assert(0);
+ }
+
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_par_filter_size = m_filter_size/m_num_hashes;
+ m_par_filter_size_bits = log_int(m_par_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+H3BloomFilter::~H3BloomFilter(){
+}
+
+void H3BloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void H3BloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void H3BloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void H3BloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ H3BloomFilter * temp = (H3BloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void H3BloomFilter::set(const Address& addr)
+{
+ for (int i = 0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ m_filter[idx] = 1;
+
+ //Profile hash value distribution
+ //g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); // gem5:Arka decomissiong of log_tm
+ }
+}
+
+void H3BloomFilter::unset(const Address& addr)
+{
+ cout << "ERROR: Unset should never be called in a Bloom filter";
+ assert(0);
+}
+
+bool H3BloomFilter::isSet(const Address& addr)
+{
+ bool res = true;
+
+ for (int i=0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ res = res && m_filter[idx];
+ }
+ return res;
+}
+
+
+int H3BloomFilter::getCount(const Address& addr)
+{
+ return isSet(addr)? 1: 0;
+}
+
+int H3BloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+}
+
+int H3BloomFilter::readBit(const int index) {
+ return 0;
+}
+
+void H3BloomFilter::writeBit(const int index, const int value) {
+
+}
+
+int H3BloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void H3BloomFilter::print(ostream& out) const
+{
+}
+
+int H3BloomFilter::get_index(const Address& addr, int i)
+{
+ uint64 x = addr.getLineAddress();
+ //uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
+ int y = hash_H3(x,i);
+
+ if(isParallel) {
+ return (y % m_par_filter_size) + i*m_par_filter_size;
+ } else {
+ return y % m_filter_size;
+ }
+}
+
+int H3BloomFilter::hash_H3(uint64 value, int index) {
+ uint64 mask = 1;
+ uint64 val = value;
+ int result = 0;
+
+ for(int i = 0; i < 64; i++) {
+ if(val&mask) result ^= H3[i][index];
+ val = val >> 1;
+ }
+ return result;
+ }
+
diff --git a/src/mem/ruby/filters/H3BloomFilter.hh b/src/mem/ruby/filters/H3BloomFilter.hh
new file mode 100644
index 000000000..70f8a4506
--- /dev/null
+++ b/src/mem/ruby/filters/H3BloomFilter.hh
@@ -0,0 +1,1258 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * H3BloomFilter.hh
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef H3_BLOOM_FILTER_H
+#define H3_BLOOM_FILTER_H
+
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
+
+static int H3[64][16] = {
+{
+33268410,
+395488709,
+311024285,
+456111753,
+181495008,
+119997521,
+220697869,
+433891432,
+755927921,
+515226970,
+719448198,
+349842774,
+269183649,
+463275672,
+429800228,
+521598937
+},
+{
+628677802,
+820947732,
+809435975,
+1024657192,
+887631270,
+412050215,
+391365090,
+324227279,
+318338329,
+1038393087,
+489807930,
+387366128,
+518096428,
+324184340,
+429376066,
+447109279
+},
+{
+599747653,
+404960623,
+103933604,
+946416030,
+656460913,
+925957005,
+1047665689,
+163552053,
+88359290,
+841315415,
+899833584,
+1067336680,
+348549994,
+464045876,
+270252128,
+829897652
+},
+{
+215495230,
+966696438,
+82589012,
+750102795,
+909780866,
+920285789,
+769759214,
+331966823,
+939936006,
+439950703,
+883794828,
+1009277508,
+61634610,
+741444350,
+98689608,
+524144422
+},
+{
+93868534,
+196958667,
+774076619,
+327921978,
+122538783,
+879785030,
+690748527,
+3498564,
+83163077,
+1027963025,
+582088444,
+466152216,
+312424878,
+550064499,
+646612667,
+561099434
+},
+{
+1002047931,
+395477707,
+821317480,
+890482112,
+697094476,
+263813044,
+840275189,
+469664185,
+795625845,
+211504898,
+99204277,
+1004491153,
+725930417,
+1064479221,
+893834767,
+839719181
+},
+{
+278507126,
+985111995,
+706462983,
+1042178726,
+123281719,
+963778122,
+500881056,
+726291104,
+134293026,
+568379664,
+317050609,
+533470307,
+1022365922,
+197645211,
+315125721,
+634827678
+},
+{
+219227366,
+553960647,
+870169525,
+322232839,
+508322497,
+648672696,
+249405795,
+883596102,
+476433133,
+541372919,
+646647793,
+1042679515,
+43242483,
+600187508,
+499866821,
+135713210
+},
+{
+52837162,
+96966684,
+401840460,
+1071661176,
+733560065,
+150035417,
+341319946,
+811582750,
+636173904,
+519054065,
+196321433,
+1028294565,
+882204070,
+522965093,
+48884074,
+117810166
+},
+{
+650860353,
+789534698,
+328813544,
+473250022,
+143128306,
+173196006,
+846958825,
+174632187,
+683273509,
+405459497,
+787235556,
+773873501,
+240110267,
+426797736,
+92043842,
+711789240
+},
+{
+586637493,
+5059646,
+398035664,
+6686087,
+498300175,
+948278148,
+681227731,
+592751744,
+572019677,
+558044722,
+589368271,
+695745538,
+1073416749,
+529192035,
+550984939,
+1070620580
+},
+{
+102904663,
+647598516,
+758863940,
+313426443,
+76504114,
+1050747783,
+708436441,
+563815069,
+224107668,
+875925186,
+167675944,
+926209739,
+279737287,
+1040288182,
+768184312,
+371708956
+},
+{
+683968868,
+1027427757,
+180781926,
+742898864,
+624078545,
+645659833,
+577225838,
+987150210,
+723410002,
+224013421,
+993286634,
+33188488,
+247264323,
+888018697,
+38048664,
+189037096
+},
+{
+475612146,
+426739285,
+873726278,
+529192871,
+607715202,
+388486246,
+987001312,
+474493980,
+259747270,
+417465536,
+217062395,
+392858482,
+563810075,
+137852805,
+1051814153,
+72895217
+},
+{
+71277086,
+785496675,
+500608842,
+89633426,
+274085706,
+248467935,
+838061983,
+48106147,
+773662506,
+49545328,
+9071573,
+100739031,
+602018002,
+904371654,
+534132064,
+332211304
+},
+{
+401893602,
+735125342,
+775548339,
+210224843,
+256081130,
+482894412,
+350801633,
+1035713633,
+429458128,
+327281409,
+739927752,
+359327650,
+886942880,
+847691759,
+752417993,
+359445596
+},
+{
+267472014,
+1050659620,
+1068232362,
+1049684368,
+17130239,
+690524969,
+793224378,
+14455158,
+423092885,
+873853424,
+430535778,
+7867877,
+309731959,
+370260786,
+862353083,
+403906850
+},
+{
+993077283,
+218812656,
+389234651,
+393202875,
+413116501,
+263300295,
+470013158,
+592730725,
+441847172,
+732392823,
+407574059,
+875664777,
+271347307,
+792954404,
+554774761,
+1022424300
+},
+{
+675919719,
+637054073,
+784720745,
+149714381,
+813144874,
+502525801,
+635436670,
+1003196587,
+160786091,
+947509775,
+969788637,
+26854073,
+257964369,
+63898568,
+539767732,
+772364518
+},
+{
+943076868,
+1021732472,
+697575075,
+15843624,
+617573396,
+534113303,
+122953324,
+964873912,
+942995378,
+87830944,
+1012914818,
+455484661,
+592160054,
+599844284,
+810394353,
+836812568
+},
+{
+688992674,
+279465370,
+731582262,
+687883235,
+438178468,
+80493001,
+342701501,
+663561405,
+23360106,
+531315007,
+508931618,
+36294623,
+231216223,
+840438413,
+255665680,
+663205938
+},
+{
+857265418,
+552630887,
+8173237,
+792122963,
+210140052,
+823124938,
+667709953,
+751538219,
+991957789,
+462064153,
+19070176,
+726604748,
+714567823,
+151147895,
+1012619677,
+697114353
+},
+{
+467105652,
+683256174,
+702387467,
+28730434,
+549942998,
+48712701,
+960519696,
+1008345587,
+679267717,
+370932249,
+880419471,
+352141567,
+331640403,
+598772468,
+95160685,
+812053015
+},
+{
+1053491323,
+430526562,
+1014938507,
+109685515,
+765949103,
+177288303,
+1034642653,
+485421658,
+71850281,
+981034542,
+61620389,
+601367920,
+504420930,
+220599168,
+583051998,
+158735752
+},
+{
+103033901,
+522494916,
+658494760,
+959206022,
+931348143,
+834510661,
+21542994,
+189699884,
+679327018,
+171983002,
+96774168,
+456133168,
+543103352,
+923945936,
+970074188,
+643658485
+},
+{
+566379913,
+805798263,
+840662512,
+820206124,
+796507494,
+223712542,
+118811519,
+662246595,
+809326534,
+416471323,
+748027186,
+161169753,
+739149488,
+276330378,
+924837051,
+964873733
+},
+{
+585882743,
+135502711,
+3386031,
+625631285,
+1068193307,
+270342640,
+432739484,
+556606453,
+826419155,
+1038540977,
+158000202,
+69109538,
+207087256,
+298111218,
+678046259,
+184611498
+},
+{
+305310710,
+46237988,
+855726974,
+735975153,
+930663798,
+425764232,
+104362407,
+391371443,
+867622101,
+71645091,
+61824734,
+661902640,
+293738633,
+309416189,
+281710675,
+879317360
+},
+{
+398146324,
+398293087,
+689145387,
+1038451703,
+521637478,
+516134620,
+314658937,
+830334981,
+583400300,
+340083705,
+68029852,
+675389876,
+994635780,
+788959180,
+406967042,
+74403607
+},
+{
+69463153,
+744427484,
+191639960,
+590927798,
+969916795,
+546846769,
+728756758,
+889355646,
+520855076,
+136068426,
+776132410,
+189663815,
+252051082,
+533662856,
+362198652,
+1026161384
+},
+{
+584984279,
+1004834381,
+568439705,
+834508761,
+21812513,
+670870173,
+1052043300,
+341868768,
+473755574,
+124339439,
+36193947,
+437997647,
+137419489,
+58705193,
+337793711,
+340738909
+},
+{
+898051466,
+512792906,
+234874060,
+655358775,
+683745319,
+671676404,
+428888546,
+639928192,
+672697722,
+176477579,
+747020991,
+758211282,
+443045009,
+205395173,
+1016944273,
+5584717
+},
+{
+156038300,
+138620174,
+588466825,
+1061494056,
+1013672100,
+1064257198,
+881417791,
+839470738,
+83519030,
+100875683,
+237486447,
+461483733,
+681527127,
+777996147,
+574635362,
+815974538
+},
+{
+184168473,
+519509808,
+62531892,
+51821173,
+43787358,
+385711644,
+141325169,
+36069511,
+584183031,
+571372909,
+671503175,
+226486781,
+194932686,
+1045460970,
+753718579,
+331442433
+},
+{
+73065106,
+1015327221,
+630916840,
+1058053470,
+306737587,
+296343219,
+907194989,
+920172546,
+224516225,
+818625553,
+551143849,
+634570650,
+432966225,
+756438259,
+939564853,
+767999933
+},
+{
+884775648,
+394862257,
+446787794,
+219833788,
+727195727,
+728122304,
+249888353,
+732947974,
+289908868,
+448282580,
+618161877,
+898939716,
+739554163,
+860631799,
+1058977530,
+86916736
+},
+{
+143850006,
+352708694,
+200194048,
+979764914,
+629404175,
+546279766,
+72106714,
+860980514,
+313190585,
+897143111,
+308425797,
+953791785,
+349924906,
+221457005,
+950588925,
+908254505
+},
+{
+950032043,
+829868728,
+68623614,
+714624605,
+69760597,
+297275854,
+355894016,
+985369737,
+882852618,
+864071289,
+958512902,
+950910111,
+991368991,
+829645051,
+434698210,
+771350575
+},
+{
+552695074,
+319195551,
+80297396,
+496413831,
+944046531,
+621525571,
+617653363,
+416729825,
+441842808,
+9847464,
+99420657,
+1033914550,
+812966458,
+937053011,
+673390195,
+934577365
+},
+{
+1034695843,
+190969665,
+332900185,
+51897434,
+523888639,
+883512843,
+146908572,
+506785674,
+565814307,
+692255649,
+314052926,
+826386588,
+430691325,
+866927620,
+413880214,
+936474339
+},
+{
+129380164,
+741739952,
+1013703462,
+494392795,
+957214600,
+1010879043,
+931790677,
+94551922,
+988065869,
+120637871,
+882506912,
+395075379,
+210570485,
+812422692,
+910383687,
+817722285
+},
+{
+51850866,
+283408630,
+1053047202,
+858940389,
+818507731,
+477082181,
+353546901,
+993324368,
+407093779,
+231608253,
+1067319867,
+73159811,
+429792535,
+971320614,
+565699344,
+718823399
+},
+{
+408185106,
+491493570,
+596050720,
+310776444,
+703628192,
+454438809,
+523988035,
+728512200,
+686012353,
+976339656,
+72816924,
+116926720,
+165866591,
+452043792,
+866943072,
+968545481
+},
+{
+443231195,
+905907843,
+1061421320,
+746360489,
+1043120338,
+1069659155,
+463359031,
+688303227,
+186550710,
+155347339,
+1044842421,
+1005904570,
+69332909,
+706951903,
+422513657,
+882038450
+},
+{
+430990623,
+946501980,
+742556791,
+278398643,
+183759217,
+659404315,
+279754382,
+1069347846,
+843746517,
+222777670,
+990835599,
+548741637,
+129220580,
+1392170,
+1032654091,
+894058935
+},
+{
+452042227,
+751640705,
+259481376,
+765824585,
+145991469,
+1013683228,
+1055491225,
+536379588,
+392593350,
+913368594,
+1029429776,
+226857786,
+31505342,
+1054416381,
+32341741,
+687106649
+},
+{
+404750944,
+811417027,
+869530820,
+773491060,
+810901282,
+979340397,
+1036910290,
+461764404,
+834235095,
+765695033,
+604692390,
+452158120,
+928988098,
+442719218,
+1024059719,
+167723114
+},
+{
+974245177,
+1046377300,
+1003424287,
+787349855,
+336314155,
+875074696,
+1018462718,
+890313003,
+367376809,
+86355556,
+1020618772,
+890710345,
+444741481,
+373230261,
+767064947,
+840920177
+},
+{
+719581124,
+431808156,
+138301690,
+668222575,
+497413494,
+740492013,
+485033226,
+125301442,
+831265111,
+879071459,
+341690480,
+152975256,
+850330086,
+717444507,
+694225877,
+785340566
+},
+{
+1032766252,
+140959364,
+737474726,
+1062767538,
+364464647,
+331414723,
+356152634,
+642832379,
+158733632,
+374691640,
+285504811,
+345349905,
+876599880,
+476392727,
+479589210,
+606376325
+},
+{
+174997730,
+778177086,
+319164313,
+163614456,
+10331364,
+599358958,
+8331663,
+237538058,
+159173957,
+174533880,
+65588684,
+878222844,
+424467599,
+901803515,
+187504218,
+776690353
+},
+{
+803856182,
+965850321,
+694948067,
+218315960,
+358416571,
+683713254,
+178069303,
+428076035,
+686176454,
+579553217,
+357306738,
+315018080,
+886852373,
+568563910,
+896839725,
+257416821
+},
+{
+401650013,
+183289141,
+497957228,
+879734476,
+265024455,
+825794561,
+889237440,
+323359863,
+100258491,
+991414783,
+313986632,
+85847250,
+362520248,
+276103512,
+1041630342,
+525981595
+},
+{
+487732740,
+46201705,
+990837834,
+62744493,
+1067364756,
+58015363,
+690846283,
+680262648,
+997278956,
+469357861,
+432164624,
+996763915,
+211907847,
+167824295,
+144928194,
+454839915
+},
+{
+41404232,
+514493300,
+259546924,
+578217256,
+972345130,
+123299213,
+346040332,
+1014668104,
+520910639,
+579955198,
+36627803,
+179072921,
+547684341,
+598950511,
+269497394,
+854352266
+},
+{
+603906768,
+100863318,
+708837659,
+204175569,
+375560904,
+908375384,
+28314106,
+6303733,
+175283124,
+749851198,
+308667367,
+415293931,
+225365403,
+1032188331,
+977112710,
+819705229
+},
+{
+399767123,
+697985692,
+356790426,
+643687584,
+298624218,
+185095167,
+381653926,
+876816342,
+296720023,
+2205879,
+235816616,
+521850105,
+622753786,
+1021421218,
+726349744,
+256504902
+},
+{
+851245024,
+1022500222,
+511909628,
+313809625,
+99776025,
+39710175,
+798739932,
+741832408,
+140631966,
+898295927,
+607660421,
+870669312,
+1051422478,
+789055529,
+669113756,
+681943450
+},
+{
+853872755,
+491465269,
+503341472,
+98019440,
+258267420,
+335602837,
+320687824,
+1053324395,
+24932389,
+955011453,
+934255131,
+435625663,
+501568768,
+238967025,
+549987406,
+248619780
+},
+{
+411151284,
+576471205,
+757985419,
+544137226,
+968135693,
+877548443,
+194586894,
+74882373,
+248353663,
+21207540,
+273789651,
+853653916,
+861267970,
+533253322,
+3739570,
+661358586
+},
+{
+271430986,
+71390029,
+257643671,
+949329860,
+348156406,
+251939238,
+445808698,
+48269799,
+907589462,
+105677619,
+635451508,
+20805932,
+464874661,
+7542147,
+243619464,
+288304568
+},
+{
+368215982,
+530288964,
+770090421,
+660961164,
+614935537,
+630760399,
+931299233,
+794519275,
+779918979,
+401746493,
+561237006,
+1027202224,
+258968003,
+339508073,
+1050610516,
+1064307013
+},
+{
+1039172162,
+448331205,
+928997884,
+49813151,
+198712120,
+992335354,
+671024050,
+879525220,
+745915336,
+1038822580,
+138669665,
+917958819,
+681422342,
+792868818,
+924762727,
+816386174
+},
+{
+515190336,
+313808618,
+441296783,
+1022120897,
+792325033,
+354387581,
+59273006,
+280075434,
+411357221,
+665274694,
+4054464,
+1059046246,
+394261773,
+848616745,
+15446017,
+517723271
+}};
+
+
+class H3BloomFilter : public AbstractBloomFilter {
+public:
+
+ ~H3BloomFilter();
+ H3BloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ void print(ostream& out) const;
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr, int hashNumber);
+
+ int hash_H3(uint64 value, int index);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_num_hashes;
+ int m_filter_size_bits;
+
+ int m_par_filter_size;
+ int m_par_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+
+
+
+ int primes_list[6];// = {9323,11279,10247,30637,25717,43711};
+ int mults_list[6]; //= {255,29,51,3,77,43};
+ int adds_list[6]; //= {841,627,1555,241,7777,65391};
+
+ bool isParallel;
+
+};
+
+
+#endif
diff --git a/src/mem/ruby/filters/LSB_CountingBloomFilter.cc b/src/mem/ruby/filters/LSB_CountingBloomFilter.cc
new file mode 100644
index 000000000..fd2e2653c
--- /dev/null
+++ b/src/mem/ruby/filters/LSB_CountingBloomFilter.cc
@@ -0,0 +1,141 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LSB_CountingBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/LSB_CountingBloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+LSB_CountingBloomFilter::LSB_CountingBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, ':');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_count = atoi(tail.c_str());
+ m_count_bits = log_int(m_count);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+LSB_CountingBloomFilter::~LSB_CountingBloomFilter(){
+}
+
+void LSB_CountingBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void LSB_CountingBloomFilter::increment(const Address& addr)
+{
+ int i = get_index(addr);
+ if (m_filter[i] < m_count);
+ m_filter[i] += 1;
+}
+
+
+void LSB_CountingBloomFilter::decrement(const Address& addr)
+{
+ int i = get_index(addr);
+ if (m_filter[i] > 0)
+ m_filter[i] -= 1;
+}
+
+void LSB_CountingBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void LSB_CountingBloomFilter::set(const Address& addr)
+{
+ // TODO
+}
+
+void LSB_CountingBloomFilter::unset(const Address& addr)
+{
+ // TODO
+}
+
+bool LSB_CountingBloomFilter::isSet(const Address& addr)
+{
+ // TODO
+}
+
+
+int LSB_CountingBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int LSB_CountingBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+int LSB_CountingBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+void LSB_CountingBloomFilter::print(ostream& out) const
+{
+}
+
+int LSB_CountingBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void LSB_CountingBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+int LSB_CountingBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_filter_size_bits - 1);
+}
+
+
diff --git a/src/mem/ruby/filters/LSB_CountingBloomFilter.hh b/src/mem/ruby/filters/LSB_CountingBloomFilter.hh
new file mode 100644
index 000000000..7a0f71fad
--- /dev/null
+++ b/src/mem/ruby/filters/LSB_CountingBloomFilter.hh
@@ -0,0 +1,82 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LSB_CountingBloomFilter.hh
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef LSB_COUNTING_BLOOM_FILTER_H
+#define LSB_COUNTING_BLOOM_FILTER_H
+
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
+
+class LSB_CountingBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~LSB_CountingBloomFilter();
+ LSB_CountingBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc
new file mode 100644
index 000000000..844f80160
--- /dev/null
+++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc
@@ -0,0 +1,191 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/MultiBitSelBloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+MultiBitSelBloomFilter::MultiBitSelBloomFilter(string str)
+{
+
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_num_hashes = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_skip_bits = atoi(head.c_str());
+
+ if(tail == "Regular") {
+ isParallel = false;
+ } else if (tail == "Parallel") {
+ isParallel = true;
+ } else {
+ cout << "ERROR: Incorrect config string for MultiBitSel Bloom! :" << str << endl;
+ assert(0);
+ }
+
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_par_filter_size = m_filter_size/m_num_hashes;
+ m_par_filter_size_bits = log_int(m_par_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+MultiBitSelBloomFilter::~MultiBitSelBloomFilter(){
+}
+
+void MultiBitSelBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void MultiBitSelBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void MultiBitSelBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void MultiBitSelBloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void MultiBitSelBloomFilter::set(const Address& addr)
+{
+ for (int i = 0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ m_filter[idx] = 1;
+
+ //Profile hash value distribution
+ //g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); //gem5:Arka for decomissioning of log_tm
+ }
+}
+
+void MultiBitSelBloomFilter::unset(const Address& addr)
+{
+ cout << "ERROR: Unset should never be called in a Bloom filter";
+ assert(0);
+}
+
+bool MultiBitSelBloomFilter::isSet(const Address& addr)
+{
+ bool res = true;
+
+ for (int i=0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ res = res && m_filter[idx];
+ }
+ return res;
+}
+
+
+int MultiBitSelBloomFilter::getCount(const Address& addr)
+{
+ return isSet(addr)? 1: 0;
+}
+
+int MultiBitSelBloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+}
+
+int MultiBitSelBloomFilter::readBit(const int index) {
+ return 0;
+}
+
+void MultiBitSelBloomFilter::writeBit(const int index, const int value) {
+
+}
+
+int MultiBitSelBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void MultiBitSelBloomFilter::print(ostream& out) const
+{
+}
+
+int MultiBitSelBloomFilter::get_index(const Address& addr, int i)
+{
+ // m_skip_bits is used to perform BitSelect after skipping some bits. Used to simulate BitSel hashing on larger than cache-line granularities
+ uint64 x = (addr.getLineAddress()) >> m_skip_bits;
+ int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
+ //36-bit addresses, 6-bit cache lines
+
+ if(isParallel) {
+ return (y % m_par_filter_size) + i*m_par_filter_size;
+ } else {
+ return y % m_filter_size;
+ }
+}
+
+
+int MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits) {
+ uint64 mask = 1;
+ int result = 0;
+ int bit, i;
+
+ for(i = 0; i < numBits; i++) {
+ bit = (index + jump*i) % maxBits;
+ if (value & (mask << bit)) result += mask << i;
+ }
+ return result;
+}
diff --git a/src/mem/ruby/system/PersistentArbiter.hh b/src/mem/ruby/filters/MultiBitSelBloomFilter.hh
index 1ce05f51f..390b7c37c 100644
--- a/src/mem/ruby/system/PersistentArbiter.hh
+++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.hh
@@ -28,80 +28,70 @@
*/
/*
- * PersistentArbiter.hh
+ * MultiBitSelBloomFilter.hh
*
* Description:
*
- * Used for hierarchical distributed persistent request scheme
*
*/
-#ifndef PERSISTENTARBITER_H
-#define PERSISTENTARBITER_H
+#ifndef MULTIBITSEL_BLOOM_FILTER_H
+#define MULTIBITSEL_BLOOM_FILTER_H
+#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
-#include "mem/gems_common/Vector.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
-#include "mem/protocol/AccessPermission.hh"
-#include "mem/protocol/AccessType.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/config/RubyConfig.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
-struct ArbiterEntry {
- bool valid;
- Address address;
- AccessType type;
- NodeID localId;
-};
-
-class PersistentArbiter {
+class MultiBitSelBloomFilter : public AbstractBloomFilter {
public:
- // Constructors
- PersistentArbiter(AbstractChip* chip_ptr);
+ ~MultiBitSelBloomFilter();
+ MultiBitSelBloomFilter(string config);
- // Destructor
- ~PersistentArbiter();
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
- // Public Methods
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ void print(ostream& out) const;
- void addLocker(NodeID id, Address addr, AccessType type);
- void removeLocker(NodeID id);
- bool successorRequestPresent(Address addr, NodeID id);
- bool lockersExist();
- void advanceActiveLock();
- Address getActiveLockAddress();
- NodeID getArbiterId();
- bool isBusy();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
- void setIssuedAddress(Address addr);
- bool isIssuedAddress(Address addr);
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+private:
- Address getIssuedAddress() { return m_issued_address; }
+ int get_index(const Address& addr, int hashNumber);
- static void printConfig(ostream& out) {}
- void print(ostream& out) const;
+ int hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits);
- NodeID getActiveLocalId();
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_num_hashes;
+ int m_filter_size_bits;
+ int m_skip_bits;
-private:
+ int m_par_filter_size;
+ int m_par_filter_size_bits;
- Address m_issued_address;
- AbstractChip* m_chip_ptr;
- int m_locker;
- bool m_busy;
- Vector<ArbiterEntry> m_entries;
-};
+ int m_count_bits;
+ int m_count;
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const PersistentArbiter& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
+ bool isParallel;
+};
-#endif //PERFECTCACHEMEMORY_H
+#endif
diff --git a/src/mem/ruby/filters/MultiGrainBloomFilter.cc b/src/mem/ruby/filters/MultiGrainBloomFilter.cc
new file mode 100644
index 000000000..ff32ce923
--- /dev/null
+++ b/src/mem/ruby/filters/MultiGrainBloomFilter.cc
@@ -0,0 +1,172 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiGrainBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/MultiGrainBloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+MultiGrainBloomFilter::MultiGrainBloomFilter(string str)
+{
+ string tail(str);
+
+ // split into the 2 filter sizes
+ string head = string_split(tail, '_');
+
+ // head contains size of 1st bloom filter, tail contains size of 2nd bloom filter
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_page_filter_size = atoi(tail.c_str());
+ m_page_filter_size_bits = log_int(m_page_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ m_page_filter.setSize(m_page_filter_size);
+ clear();
+}
+
+MultiGrainBloomFilter::~MultiGrainBloomFilter(){
+}
+
+void MultiGrainBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+ for(int i=0; i < m_page_filter_size; ++i){
+ m_page_filter[i] = 0;
+ }
+}
+
+void MultiGrainBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void MultiGrainBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void MultiGrainBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void MultiGrainBloomFilter::set(const Address& addr)
+{
+ int i = get_block_index(addr);
+ int j = get_page_index(addr);
+ assert(i < m_filter_size);
+ assert(j < m_page_filter_size);
+ m_filter[i] = 1;
+ m_page_filter[i] = 1;
+
+}
+
+void MultiGrainBloomFilter::unset(const Address& addr)
+{
+ // not used
+}
+
+bool MultiGrainBloomFilter::isSet(const Address& addr)
+{
+ int i = get_block_index(addr);
+ int j = get_page_index(addr);
+ assert(i < m_filter_size);
+ assert(j < m_page_filter_size);
+ // we have to have both indices set
+ return (m_filter[i] && m_page_filter[i]);
+}
+
+int MultiGrainBloomFilter::getCount(const Address& addr)
+{
+ // not used
+ return 0;
+}
+
+int MultiGrainBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+
+ for(int i=0; i < m_page_filter_size; ++i){
+ count += m_page_filter[i] = 0;
+ }
+
+ return count;
+}
+
+int MultiGrainBloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+ // TODO
+}
+
+int MultiGrainBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void MultiGrainBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+void MultiGrainBloomFilter::print(ostream& out) const
+{
+}
+
+int MultiGrainBloomFilter::get_block_index(const Address& addr)
+{
+ // grap a chunk of bits after byte offset
+ return addr.bitSelect( RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_filter_size_bits - 1);
+}
+
+int MultiGrainBloomFilter::get_page_index(const Address & addr)
+{
+ // grap a chunk of bits after first chunk
+ return addr.bitSelect( RubySystem::getBlockSizeBits() + m_filter_size_bits - 1,
+ RubySystem::getBlockSizeBits() + m_filter_size_bits - 1 + m_page_filter_size_bits - 1);
+}
+
+
+
+
diff --git a/src/mem/ruby/filters/MultiGrainBloomFilter.hh b/src/mem/ruby/filters/MultiGrainBloomFilter.hh
new file mode 100644
index 000000000..66a32ecd4
--- /dev/null
+++ b/src/mem/ruby/filters/MultiGrainBloomFilter.hh
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiGrainBloomFilter.hh
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef MULTIGRAIN_BLOOM_FILTER_H
+#define MULTIGRAIN_BLOOM_FILTER_H
+
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
+
+class MultiGrainBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~MultiGrainBloomFilter();
+ MultiGrainBloomFilter(string str);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_block_index(const Address& addr);
+ int get_page_index(const Address & addr);
+
+ // The block filter
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+ // The page number filter
+ Vector<int> m_page_filter;
+ int m_page_filter_size;
+ int m_page_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/filters/NonCountingBloomFilter.cc b/src/mem/ruby/filters/NonCountingBloomFilter.cc
new file mode 100644
index 000000000..738945105
--- /dev/null
+++ b/src/mem/ruby/filters/NonCountingBloomFilter.cc
@@ -0,0 +1,144 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.cc
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "mem/ruby/filters/NonCountingBloomFilter.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+
+NonCountingBloomFilter::NonCountingBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+ m_offset = atoi(tail.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+NonCountingBloomFilter::~NonCountingBloomFilter(){
+}
+
+void NonCountingBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void NonCountingBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void NonCountingBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void NonCountingBloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void NonCountingBloomFilter::set(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 1;
+}
+
+void NonCountingBloomFilter::unset(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 0;
+}
+
+bool NonCountingBloomFilter::isSet(const Address& addr)
+{
+ int i = get_index(addr);
+ return (m_filter[i]);
+}
+
+
+int NonCountingBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int NonCountingBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void NonCountingBloomFilter::print(ostream& out) const
+{
+}
+
+int NonCountingBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+int NonCountingBloomFilter::readBit(const int index) {
+ return m_filter[index];
+}
+
+void NonCountingBloomFilter::writeBit(const int index, const int value) {
+ m_filter[index] = value;
+}
+
+int NonCountingBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubySystem::getBlockSizeBits() + m_offset,
+ RubySystem::getBlockSizeBits() + m_offset + m_filter_size_bits - 1);
+}
+
+
diff --git a/src/mem/ruby/tester/CheckTable.hh b/src/mem/ruby/filters/NonCountingBloomFilter.hh
index a7f486315..27045ebc9 100644
--- a/src/mem/ruby/tester/CheckTable.hh
+++ b/src/mem/ruby/filters/NonCountingBloomFilter.hh
@@ -28,66 +28,61 @@
*/
/*
- * $Id$
+ * NonCountingBloomFilter.hh
*
* Description:
*
+ *
*/
-#ifndef CHECKTABLE_H
-#define CHECKTABLE_H
+#ifndef NONCOUNTING_BLOOM_FILTER_H
+#define NONCOUNTING_BLOOM_FILTER_H
+#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
-#include "mem/gems_common/Vector.hh"
-
-class Address;
-class Check;
-template <class KEY_TYPE, class VALUE_TYPE> class Map;
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/filters/AbstractBloomFilter.hh"
-class CheckTable {
+class NonCountingBloomFilter : public AbstractBloomFilter {
public:
- // Constructors
- CheckTable();
- // Destructor
- ~CheckTable();
+ ~NonCountingBloomFilter();
+ NonCountingBloomFilter(string config);
- // Public Methods
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
- Check* getRandomCheck();
- Check* getCheck(const Address& address);
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
- // bool isPresent(const Address& address) const;
- // void removeCheckFromTable(const Address& address);
- // bool isTableFull() const;
- // Need a method to select a check or retrieve a check
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
void print(ostream& out) const;
-private:
- // Private Methods
- void addCheck(const Address& address);
- // Private copy constructor and assignment operator
- CheckTable(const CheckTable& obj);
- CheckTable& operator=(const CheckTable& obj);
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
- // Data Members (m_ prefix)
- Vector<Check*> m_check_vector;
- Map<Address, Check*>* m_lookup_map_ptr;
-};
+private:
+
+ int get_index(const Address& addr);
-// Output operator declaration
-ostream& operator<<(ostream& out, const CheckTable& obj);
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_offset;
+ int m_filter_size_bits;
-// ******************* Definitions *******************
+ int m_count_bits;
+ int m_count;
+};
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const CheckTable& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-#endif //CHECKTABLE_H
+#endif
diff --git a/src/mem/ruby/init.cc b/src/mem/ruby/init.cc
deleted file mode 100644
index a4aefaad3..000000000
--- a/src/mem/ruby/init.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * init.C
- *
- * Description: See init.hh
- *
- * $Id$
- *
- */
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/common/Debug.hh"
-#include "mem/ruby/common/Driver.hh"
-#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/ruby/tester/Tester.hh"
-#include "mem/ruby/init.hh"
-
-using namespace std;
-#include <string>
-#include <map>
-#include <stdlib.h>
-
-#include "mem/gems_common/ioutil/confio.hh"
-#include "mem/gems_common/ioutil/initvar.hh"
-
-// A generated file containing the default parameters in string form
-// The defaults are stored in the variable global_default_param
-#include "mem/ruby/default_param.hh"
-
-static initvar_t *ruby_initvar_obj = NULL;
-
-//***************************************************************************
-static void init_generate_values( void )
-{
- /* update generated values, based on input configuration */
-}
-
-//***************************************************************************
-
-void init_variables( void )
-{
- // allocate the "variable initialization" package
- ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
- global_default_param,
- &init_simulator,
- &init_generate_values);
-}
-
-
- /*
-void init_variables(const char* config_str )
-{
- // allocate the "variable initialization" package
- ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
- config_str,
- &init_simulator,
- &init_generate_values );
-}
- */
-
-void init_simulator()
-{
- // Set things to NULL to make sure we don't de-reference them
- // without a seg. fault.
- g_system_ptr = NULL;
- g_debug_ptr = NULL;
- g_eventQueue_ptr = NULL;
-
- cout << "Ruby Timing Mode" << endl;
-
-
- g_debug_ptr = new Debug( DEBUG_FILTER_STRING,
- DEBUG_VERBOSITY_STRING,
- DEBUG_START_TIME,
- DEBUG_OUTPUT_FILENAME );
- RubyConfig::init();
-
- cout << "Creating event queue..." << endl;
- g_eventQueue_ptr = new RubyEventQueue;
- cout << "Creating event queue done" << endl;
-
- cout << "Creating system..." << endl;
- cout << " Processors: " << RubyConfig::numberOfProcessors() << endl;
-
- g_system_ptr = new RubySystem;
- cout << "Creating system done" << endl;
-
- cout << "Ruby initialization complete" << endl;
-}
-
-void init_simulator(Driver* _driver)
-{
- // Set things to NULL to make sure we don't de-reference them
- // without a seg. fault.
- g_system_ptr = NULL;
- g_debug_ptr = NULL;
- g_eventQueue_ptr = NULL;
-
- cout << "Ruby Timing Mode" << endl;
-
-
- g_debug_ptr = new Debug( DEBUG_FILTER_STRING,
- DEBUG_VERBOSITY_STRING,
- DEBUG_START_TIME,
- DEBUG_OUTPUT_FILENAME );
- RubyConfig::init();
-
- cout << "Creating event queue..." << endl;
- g_eventQueue_ptr = new RubyEventQueue;
- cout << "Creating event queue done" << endl;
-
- cout << "Creating system..." << endl;
- cout << " Processors: " << RubyConfig::numberOfProcessors() << endl;
-
- g_system_ptr = new RubySystem(_driver);
- cout << "Creating system done" << endl;
-
- cout << "Ruby initialization complete" << endl;
-}
-
-void destroy_simulator()
-{
- cout << "Deleting system..." << endl;
- delete g_system_ptr;
- cout << "Deleting system done" << endl;
-
- cout << "Deleting event queue..." << endl;
- delete g_eventQueue_ptr;
- cout << "Deleting event queue done" << endl;
-
- delete g_debug_ptr;
-}
-
-/*-------------------------------------------------------------------------+
- | DG: These are the external load and unload hooks that will be called by |
- | M5 in phase 1 integration, and possibly afterwards, too. |
- +-------------------------------------------------------------------------*/
-
-//dsm: superfluous
-/*extern "C"
-int OnLoadRuby() {
- init_variables();
- return 0;
-}
-
-extern "C"
-int OnInitRuby() {
- init_simulator();
- return 0;
-}
-
-extern "C"
-int OnUnloadRuby() {
- destroy_simulator();
- return 0;
-}*/
-
-/* I have to put it somewhere for now */
-void tester_main(int argc, char **argv) {
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented." << std::endl;
-}
diff --git a/src/mem/ruby/libruby.cc b/src/mem/ruby/libruby.cc
new file mode 100644
index 000000000..4083a888c
--- /dev/null
+++ b/src/mem/ruby/libruby.cc
@@ -0,0 +1,206 @@
+
+#include <sys/wait.h>
+#include <algorithm>
+
+#include "mem/ruby/libruby_internal.hh"
+#include "mem/ruby/system/RubyPort.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include "mem/ruby/system/MemoryVector.hh"
+#include "mem/ruby/common/Address.hh"
+
+string RubyRequestType_to_string(const RubyRequestType& obj)
+{
+ switch(obj) {
+ case RubyRequestType_IFETCH:
+ return "IFETCH";
+ case RubyRequestType_LD:
+ return "LD";
+ case RubyRequestType_ST:
+ return "ST";
+ case RubyRequestType_RMW:
+ return "RMW";
+ case RubyRequestType_NULL:
+ default:
+ assert(0);
+ return "";
+ }
+}
+
+RubyRequestType string_to_RubyRequestType(std::string str)
+{
+ if (str == "IFETCH")
+ return RubyRequestType_IFETCH;
+ else if (str == "LD")
+ return RubyRequestType_LD;
+ else if (str == "ST")
+ return RubyRequestType_ST;
+ else if (str == "RMW")
+ return RubyRequestType_RMW;
+ else
+ assert(0);
+ return RubyRequestType_NULL;
+}
+
+ostream& operator<<(ostream& out, const RubyRequestType& obj)
+{
+ cerr << "in op" << endl;
+ out << RubyRequestType_to_string(obj);
+ cerr << "flushing" << endl;
+ out << flush;
+ cerr << "done" << endl;
+ return out;
+}
+
+vector<string> tokenizeString(string str, string delims)
+{
+ vector<string> tokens;
+ char* pch;
+ char* tmp;
+ const char* c_delims = delims.c_str();
+ tmp = new char[str.length()+1];
+ strcpy(tmp, str.c_str());
+ pch = strtok(tmp, c_delims);
+ while (pch != NULL) {
+ string tmp_str(pch);
+ if (tmp_str == "null") tmp_str = "";
+ tokens.push_back(tmp_str);
+
+ pch = strtok(NULL, c_delims);
+ }
+ delete [] tmp;
+ return tokens;
+}
+
+void libruby_init(const char* cfg_filename)
+{
+ stringstream cfg_output;
+
+ // first we execute the Ruby-lang configuration script
+ int fd[2];
+ int pid;
+ if (pipe(fd) == -1) {
+ perror("Error Creating Pipe");
+ exit(EXIT_FAILURE);
+ }
+
+ pid = fork();
+ if (pid == -1){
+ perror("Error forking");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!pid) {
+ // child
+ close(fd[0]); // close the read end of the pipe
+ // replace stdout with the write pipe
+ if (dup2(fd[1], STDOUT_FILENO) == -1) {
+ perror("Error redirecting stdout");
+ exit(EXIT_FAILURE);
+ }
+#define QUOTE_MACRO(x, y) QUOTE_TXT(x,y)
+#define QUOTE_TXT(x, y) #x y
+ if (execlp("ruby", "ruby", "-I", QUOTE_MACRO(GEMS_ROOT, "/ruby/config"), QUOTE_MACRO(GEMS_ROOT, "/ruby/config/print_cfg.rb"), "-r", cfg_filename, NULL)) {
+ perror("execlp");
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ close(fd[1]);
+
+ int child_status;
+ if (wait(&child_status) == -1) {
+ perror("wait");
+ exit(EXIT_FAILURE);
+ }
+ if (child_status != EXIT_SUCCESS) {
+ exit(EXIT_FAILURE);
+ }
+
+ char buf[100];
+ int bytes_read;
+ while( (bytes_read = read(fd[0], buf, 100)) > 0 ) {
+ for (int i=0;i<bytes_read;i++) {
+ // cout << buf[i];
+ cfg_output << buf[i];
+ }
+ }
+ assert(bytes_read == 0);
+ close(fd[0]);
+ }
+
+ vector<RubyObjConf> * sys_conf = new vector<RubyObjConf>;
+
+ string line;
+ getline(cfg_output, line) ;
+ while ( !cfg_output.eof() ) {
+ vector<string> tokens = tokenizeString(line, " ");
+ assert(tokens.size() >= 2);
+ vector<string> argv;
+ for (size_t i=2; i<tokens.size(); i++) {
+ std::replace(tokens[i].begin(), tokens[i].end(), '%', ' ');
+ std::replace(tokens[i].begin(), tokens[i].end(), '#', '\n');
+ argv.push_back(tokens[i]);
+ }
+ sys_conf->push_back(RubyObjConf(tokens[0], tokens[1], argv));
+ tokens.clear();
+ argv.clear();
+ getline(cfg_output, line);
+ }
+
+ RubySystem::create(*sys_conf);
+ delete sys_conf;
+}
+
+RubyPortHandle libruby_get_port(const char* port_name, void (*hit_callback)(int64_t access_id))
+{
+ return static_cast<RubyPortHandle>(RubySystem::getPort(port_name, hit_callback));
+}
+
+RubyPortHandle libruby_get_port_by_name(const char* port_name)
+{
+ return static_cast<RubyPortHandle>(RubySystem::getPortOnly(port_name));
+}
+
+void libruby_write_ram(uint64_t paddr, uint8_t* data, int len)
+{
+ RubySystem::getMemoryVector()->write(Address(paddr), data, len);
+}
+
+void libruby_read_ram(uint64_t paddr, uint8_t* data, int len)
+{
+ RubySystem::getMemoryVector()->read(Address(paddr), data, len);
+}
+
+int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request)
+{
+ return static_cast<RubyPort*>(p)->makeRequest(request);
+}
+
+int libruby_tick(int n)
+{
+ RubySystem::getEventQueue()->triggerEvents(RubySystem::getEventQueue()->getTime() + n);
+ return 0;
+}
+
+void libruby_destroy()
+{
+}
+
+const char* libruby_last_error()
+{
+ return "";
+}
+
+void libruby_print_config(std::ostream & out)
+{
+ RubySystem::printConfig(out);
+}
+
+void libruby_print_stats(std::ostream & out)
+{
+ RubySystem::printStats(out);
+}
+
+uint64_t libruby_get_time() {
+ return RubySystem::getCycleCount(0);
+}
diff --git a/src/mem/ruby/libruby.hh b/src/mem/ruby/libruby.hh
new file mode 100644
index 000000000..5916c98e6
--- /dev/null
+++ b/src/mem/ruby/libruby.hh
@@ -0,0 +1,109 @@
+
+#ifndef LIBRUBY_H
+#define LIBRUBY_H
+
+#include <stdint.h>
+#include <ostream>
+
+typedef void* RubyPortHandle;
+enum RubyRequestType {
+ RubyRequestType_NULL,
+ RubyRequestType_IFETCH,
+ RubyRequestType_LD,
+ RubyRequestType_ST,
+ RubyRequestType_RMW
+};
+
+enum RubyAccessMode {
+ RubyAccessMode_User,
+ RubyAccessMode_Supervisor,
+ RubyAccessMode_Device
+};
+
+struct RubyRequest {
+ uint64_t paddr;
+ uint8_t* data;
+ int len;
+ uint64_t pc;
+ RubyRequestType type;
+ RubyAccessMode access_mode;
+
+ RubyRequest() {}
+ RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, RubyRequestType _type, RubyAccessMode _access_mode)
+ : paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type), access_mode(_access_mode)
+ {}
+};
+
+/**
+ * Initialize the system. cfg_file is a Ruby-lang configuration script
+ */
+void libruby_init(const char* cfg_file);
+
+/**
+ * Tear down a configured system. Must be invoked after a call to libruby_init.
+ */
+void libruby_destroy();
+
+/**
+ * Print the last error encountered by ruby. Currently unimplemented.
+ */
+const char* libruby_last_error();
+
+/**
+ * Retrieve a handle to a RubyPort object, identified by name in the
+ * configuration. You also pass in the callback function you want
+ * this port to use when a request completes. Only one handle to a
+ * port is allowed at a time.
+ */
+RubyPortHandle libruby_get_port(const char* name, void (*hit_callback)(int64_t access_id));
+
+/**
+ * Retrieve a handle to a RubyPort object, identified by name in the
+ * configuration.
+ */
+RubyPortHandle libruby_get_port_by_name(const char* name);
+
+
+/**
+ * issue_request returns a unique access_id to identify the ruby
+ * transaction. This access_id is later returned to the caller via
+ * hit_callback (passed to libruby_get_port)
+ */
+int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request);
+
+/**
+ * writes data directly into Ruby's data array. Note that this
+ * ignores caches, and should be considered incoherent after
+ * simulation starts.
+ */
+void libruby_write_ram(uint64_t paddr, uint8_t * data, int len);
+
+/**
+ * reads data directory from Ruby's data array. Note that this
+ * ignores caches, and should be considered incoherent after
+ * simulation starts
+ */
+void libruby_read_ram(uint64_t paddr, uint8_t * data, int len);
+
+/**
+ * tick the system n cycles. Eventually, will return the number of
+ * cycles until the next event, but for now it always returns 0
+ */
+int libruby_tick(int n);
+
+/**
+ * self explainitory
+ */
+void libruby_print_config(std::ostream & out);
+
+/**
+ * self explainitory
+ */
+void libruby_print_stats(std::ostream & out);
+
+
+/**
+ * get time
+ */
+uint64_t libruby_get_time();
+#endif
diff --git a/src/mem/ruby/libruby_internal.hh b/src/mem/ruby/libruby_internal.hh
new file mode 100644
index 000000000..efa855c82
--- /dev/null
+++ b/src/mem/ruby/libruby_internal.hh
@@ -0,0 +1,13 @@
+#ifndef LIBRUBY_INTERNAL_H
+#define LIBRUBY_INTERNAL_H
+
+#include "mem/ruby/libruby.hh"
+
+#include <ostream>
+#include <string>
+
+std::string RubyRequestType_to_string(const RubyRequestType& obj);
+RubyRequestType string_to_RubyRequestType(std::string);
+std::ostream& operator<<(std::ostream& out, const RubyRequestType& obj);
+
+#endif
diff --git a/src/mem/ruby/network/Network.cc b/src/mem/ruby/network/Network.cc
new file mode 100644
index 000000000..cb3507471
--- /dev/null
+++ b/src/mem/ruby/network/Network.cc
@@ -0,0 +1,34 @@
+
+#include "mem/protocol/MachineType.hh"
+#include "mem/ruby/network/Network.hh"
+
+Network::Network(const string & name)
+ : m_name(name)
+{
+ m_virtual_networks = 0;
+ m_topology_ptr = NULL;
+}
+
+void Network::init(const vector<string> & argv)
+{
+ m_nodes = MachineType_base_number(MachineType_NUM); // Total nodes in network
+
+ for (size_t i=0; i<argv.size(); i+=2) {
+ if (argv[i] == "number_of_virtual_networks")
+ m_virtual_networks = atoi(argv[i+1].c_str());
+ else if (argv[i] == "topology")
+ m_topology_ptr = RubySystem::getTopology(argv[i+1]);
+ else if (argv[i] == "buffer_size")
+ m_buffer_size = atoi(argv[i+1].c_str());
+ else if (argv[i] == "endpoint_bandwidth")
+ m_endpoint_bandwidth = atoi(argv[i+1].c_str());
+ else if (argv[i] == "adaptive_routing")
+ m_adaptive_routing = (argv[i+1]=="true");
+ else if (argv[i] == "link_latency")
+ m_link_latency = atoi(argv[i+1].c_str());
+
+ }
+ assert(m_virtual_networks != 0);
+ assert(m_topology_ptr != NULL);
+// printf ("HERE \n");
+}
diff --git a/src/mem/ruby/network/Network.hh b/src/mem/ruby/network/Network.hh
index d3bfa59da..5730d6591 100644
--- a/src/mem/ruby/network/Network.hh
+++ b/src/mem/ruby/network/Network.hh
@@ -49,22 +49,29 @@
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/protocol/MessageSizeType.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/config/RubyConfig.hh"
class NetDest;
class MessageBuffer;
class Throttle;
+class Topology;
class Network {
public:
// Constructors
- Network() {}
+ Network(const string & name);
+ virtual void init(const vector<string> & argv);
// Destructor
virtual ~Network() {}
// Public Methods
-
- static Network* createNetwork(int nodes);
+ int getBufferSize() { return m_buffer_size; }
+ int getNumberOfVirtualNetworks() { return m_virtual_networks; }
+ int getEndpointBandwidth() { return m_endpoint_bandwidth; }
+ bool getAdaptiveRouting() {return m_adaptive_routing; }
+ int getLinkLatency() { return m_link_latency; }
// returns the queue requested for the given component
virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered, int netNumber) = 0;
@@ -84,7 +91,7 @@ public:
virtual void printConfig(ostream& out) const = 0;
virtual void print(ostream& out) const = 0;
-private:
+protected:
// Private Methods
// Private copy constructor and assignment operator
@@ -92,6 +99,15 @@ private:
Network& operator=(const Network& obj);
// Data Members (m_ prefix)
+protected:
+ const string m_name;
+ int m_nodes;
+ int m_virtual_networks;
+ int m_buffer_size;
+ int m_endpoint_bandwidth;
+ Topology* m_topology_ptr;
+ bool m_adaptive_routing;
+ int m_link_latency;
};
// Output operator declaration
@@ -110,7 +126,7 @@ ostream& operator<<(ostream& out, const Network& obj)
// Code to map network message size types to an integer number of bytes
const int CONTROL_MESSAGE_SIZE = 8;
-const int DATA_MESSAGE_SIZE = (64+8);
+const int DATA_MESSAGE_SIZE = (RubySystem::getBlockSizeBytes()+8);
extern inline
int MessageSizeType_to_int(MessageSizeType size_type)
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh
index 5e68198cc..387ed0bc1 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh
@@ -11,7 +11,7 @@
class CreditLink_d : public NetworkLink_d {
public:
- CreditLink_d(int id):NetworkLink_d(id) {}
+ CreditLink_d(int id, int link_latency, GarnetNetwork_d *net_ptr):NetworkLink_d(id, link_latency, net_ptr) {}
};
#endif
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc
index 988b634ce..51393b576 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.cc
@@ -43,10 +43,19 @@
#include "mem/ruby/network/garnet-fixed-pipeline/CreditLink_d.hh"
#include "mem/ruby/common/NetDest.hh"
-GarnetNetwork_d::GarnetNetwork_d(int nodes)
+GarnetNetwork_d::GarnetNetwork_d(const string & name)
+ : Network(name)
{
- m_nodes = MachineType_base_number(MachineType_NUM); // Total nodes in network
- m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS; // Number of virtual networks = number of message classes in the coherence protocol
+}
+
+void GarnetNetwork_d::init(const vector<string> & argv)
+{
+ Network::init(argv);
+
+ //added by SS
+ m_network_config_ptr = new NetworkConfig;
+ m_network_config_ptr->init(argv);
+
m_ruby_start = 0;
m_flits_recieved = 0;
m_flits_injected = 0;
@@ -80,7 +89,7 @@ GarnetNetwork_d::GarnetNetwork_d(int nodes)
}
// Setup the network switches
- m_topology_ptr = new Topology(this, m_nodes);
+ m_topology_ptr->makeTopology();
int number_of_routers = m_topology_ptr->numSwitches();
for (int i=0; i<number_of_routers; i++) {
@@ -138,7 +147,7 @@ void GarnetNetwork_d::makeInLink(NodeID src, SwitchID dest, const NetDest& routi
if(!isReconfiguration)
{
NetworkLink_d *net_link = new NetworkLink_d(m_link_ptr_vector.size(), link_latency, this);
- CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size());
+ CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size(), link_latency, this);
m_link_ptr_vector.insertAtBottom(net_link);
m_creditlink_ptr_vector.insertAtBottom(credit_link);
@@ -167,7 +176,7 @@ void GarnetNetwork_d::makeOutLink(SwitchID src, NodeID dest, const NetDest& rout
if(!isReconfiguration)
{
NetworkLink_d *net_link = new NetworkLink_d(m_link_ptr_vector.size(), link_latency, this);
- CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size());
+ CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size(), link_latency, this);
m_link_ptr_vector.insertAtBottom(net_link);
m_creditlink_ptr_vector.insertAtBottom(credit_link);
@@ -190,7 +199,7 @@ void GarnetNetwork_d::makeInternalLink(SwitchID src, SwitchID dest, const NetDes
if(!isReconfiguration)
{
NetworkLink_d *net_link = new NetworkLink_d(m_link_ptr_vector.size(), link_latency, this);
- CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size());
+ CreditLink_d *credit_link = new CreditLink_d(m_creditlink_ptr_vector.size(), link_latency, this);
m_link_ptr_vector.insertAtBottom(net_link);
m_creditlink_ptr_vector.insertAtBottom(credit_link);
@@ -241,9 +250,9 @@ Time GarnetNetwork_d::getRubyStartTime()
void GarnetNetwork_d::printStats(ostream& out) const
{ double average_link_utilization = 0;
Vector<double > average_vc_load;
- average_vc_load.setSize(m_virtual_networks*NetworkConfig::getVCsPerClass());
+ average_vc_load.setSize(m_virtual_networks*m_network_config_ptr->getVCsPerClass());
- for(int i = 0; i < m_virtual_networks*NetworkConfig::getVCsPerClass(); i++)
+ for(int i = 0; i < m_virtual_networks*m_network_config_ptr->getVCsPerClass(); i++)
{
average_vc_load[i] = 0;
}
@@ -259,7 +268,7 @@ void GarnetNetwork_d::printStats(ostream& out) const
Vector<int > vc_load = m_link_ptr_vector[i]->getVcLoad();
for(int j = 0; j < vc_load.size(); j++)
{
- assert(vc_load.size() == NetworkConfig::getVCsPerClass()*m_virtual_networks);
+ assert(vc_load.size() == m_network_config_ptr->getVCsPerClass()*m_virtual_networks);
average_vc_load[j] += vc_load[j];
}
}
@@ -267,7 +276,7 @@ void GarnetNetwork_d::printStats(ostream& out) const
out << "Average Link Utilization :: " << average_link_utilization << " flits/cycle" << endl;
out << "-------------" << endl;
- for(int i = 0; i < NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ for(int i = 0; i < m_network_config_ptr->getVCsPerClass()*m_virtual_networks; i++)
{
average_vc_load[i] = (double(average_vc_load[i]) / (double(g_eventQueue_ptr->getTime()) - m_ruby_start));
out << "Average VC Load [" << i << "] = " << average_vc_load[i] << " flits/cycle " << endl;
@@ -304,7 +313,7 @@ void GarnetNetwork_d::printConfig(ostream& out) const
out << "Network Configuration" << endl;
out << "---------------------" << endl;
out << "network: GarnetNetwork_d" << endl;
- out << "topology: " << g_NETWORK_TOPOLOGY << endl;
+ out << "topology: " << m_topology_ptr->getName() << endl;
out << endl;
for (int i = 0; i < m_virtual_networks; i++)
@@ -337,10 +346,7 @@ void GarnetNetwork_d::printConfig(ostream& out) const
{
m_router_ptr_vector[i]->printConfig(out);
}
- if (g_PRINT_TOPOLOGY)
- {
- m_topology_ptr->printConfig(out);
- }
+ m_topology_ptr->printConfig(out);
}
void GarnetNetwork_d::print(ostream& out) const
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh
index e0f7aebd9..f4b809443 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh
@@ -51,10 +51,15 @@ class CreditLink_d;
class GarnetNetwork_d : public Network{
public:
- GarnetNetwork_d(int nodes);
+ GarnetNetwork_d(const string & name);
~GarnetNetwork_d();
+ void init(const vector<string> & argv);
+
+ //added by SS
+ NetworkConfig* getNetworkConfig() { return m_network_config_ptr; }
+
int getNumNodes(){ return m_nodes;}
// returns the queue requested for the given component
@@ -99,6 +104,8 @@ public:
void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
private:
+ NetworkConfig* m_network_config_ptr;
+
void checkNetworkAllocation(NodeID id, bool ordered, int network_num);
// Private copy constructor and assignment operator
@@ -106,8 +113,8 @@ private:
GarnetNetwork_d& operator=(const GarnetNetwork_d& obj);
/***********Data Members*************/
- int m_virtual_networks;
- int m_nodes;
+// int m_virtual_networks;
+// int m_nodes;
int m_flits_recieved, m_flits_injected;
double m_network_latency, m_queueing_latency;
@@ -122,7 +129,7 @@ private:
Vector<CreditLink_d *> m_creditlink_ptr_vector; // All links in the network
Vector<NetworkInterface_d *> m_ni_ptr_vector; // All NI's in Network
- Topology* m_topology_ptr;
+// Topology* m_topology_ptr;
Time m_ruby_start;
};
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc
index 77857b1f8..f75997757 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkInterface_d.cc
@@ -43,7 +43,7 @@ NetworkInterface_d::NetworkInterface_d(int id, int virtual_networks, GarnetNetwo
m_id = id;
m_net_ptr = network_ptr;
m_virtual_networks = virtual_networks;
- m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_vc_per_vnet = m_net_ptr->getNetworkConfig()->getVCsPerClass();
m_num_vcs = m_vc_per_vnet*m_virtual_networks;
m_vc_round_robin = 0;
@@ -66,7 +66,7 @@ NetworkInterface_d::NetworkInterface_d(int id, int virtual_networks, GarnetNetwo
for(int i = 0; i < m_num_vcs; i++)
{
- m_out_vc_state.insertAtBottom(new OutVcState_d(i));
+ m_out_vc_state.insertAtBottom(new OutVcState_d(i, m_net_ptr));
m_out_vc_state[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
}
}
@@ -114,7 +114,7 @@ bool NetworkInterface_d::flitisizeMessage(MsgPtr msg_ptr, int vnet)
NetDest net_msg_dest = net_msg_ptr->getInternalDestination();
Vector<NodeID> dest_nodes = net_msg_dest.getAllDest(); // gets all the destinations associated with this message.
- int num_flits = (int) ceil((double) MessageSizeType_to_int(net_msg_ptr->getMessageSize())/NetworkConfig::getFlitSize() ); // Number of flits is dependent on the link bandwidth available. This is expressed in terms of bytes/cycle or the flit size
+ int num_flits = (int) ceil((double) MessageSizeType_to_int(net_msg_ptr->getMessageSize())/m_net_ptr->getNetworkConfig()->getFlitSize() ); // Number of flits is dependent on the link bandwidth available. This is expressed in terms of bytes/cycle or the flit size
for(int ctr = 0; ctr < dest_nodes.size(); ctr++) // loop because we will be converting all multicast messages into unicast messages
{
@@ -221,7 +221,7 @@ void NetworkInterface_d::wakeup()
if(t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_)
{
free_signal = true;
- if(!NetworkConfig::isNetworkTesting()) // When we are doing network only testing, the messages do not have to be buffered into the message buffers
+ if(!m_net_ptr->getNetworkConfig()->isNetworkTesting()) // When we are doing network only testing, the messages do not have to be buffered into the message buffers
{
outNode_ptr[t_flit->get_vnet()]->enqueue(t_flit->get_msg_ptr(), 1); // enqueueing for protocol buffer. This is not required when doing network only testing
}
@@ -307,7 +307,7 @@ void NetworkInterface_d::scheduleOutputLink()
int NetworkInterface_d::get_vnet(int vc)
{
- for(int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ for(int i = 0; i < m_net_ptr->getNumberOfVirtualNetworks(); i++)
{
if(vc >= (i*m_vc_per_vnet) && vc < ((i+1)*m_vc_per_vnet))
{
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc
index 94f721646..8382d331f 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.cc
@@ -37,6 +37,7 @@
#include "mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh"
#include "mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh"
+/*
NetworkLink_d::NetworkLink_d(int id)
{
m_id = id;
@@ -45,12 +46,12 @@ NetworkLink_d::NetworkLink_d(int id)
linkBuffer = new flitBuffer_d();
m_link_utilized = 0;
- m_vc_load.setSize(NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS);
+ m_vc_load.setSize(NetworkConfig::getVCsPerClass()*RubySystem::getNetwork()->getNumberOfVirtualNetworks());
- for(int i = 0; i < NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ for(int i = 0; i < NetworkConfig::getVCsPerClass()*RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++)
m_vc_load[i] = 0;
}
-
+*/
NetworkLink_d::NetworkLink_d(int id, int link_latency, GarnetNetwork_d *net_ptr)
{
m_net_ptr = net_ptr;
@@ -58,9 +59,9 @@ NetworkLink_d::NetworkLink_d(int id, int link_latency, GarnetNetwork_d *net_ptr)
m_latency = link_latency;
linkBuffer = new flitBuffer_d();
m_link_utilized = 0;
- m_vc_load.setSize(NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS);
+ m_vc_load.setSize(m_net_ptr->getNetworkConfig()->getVCsPerClass()*net_ptr->getNumberOfVirtualNetworks());
- for(int i = 0; i < NetworkConfig::getVCsPerClass()*NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ for(int i = 0; i < m_net_ptr->getNetworkConfig()->getVCsPerClass()*net_ptr->getNumberOfVirtualNetworks(); i++)
m_vc_load[i] = 0;
}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh
index 1e81a565b..90fb9f6dc 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/NetworkLink_d.hh
@@ -46,7 +46,7 @@ class GarnetNetwork_d;
class NetworkLink_d : public Consumer {
public:
- NetworkLink_d(int id);
+ //NetworkLink_d(int id);
~NetworkLink_d();
NetworkLink_d(int id, int link_latency, GarnetNetwork_d *net_ptr);
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc
index 4fd040099..69e3ae377 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.cc
@@ -27,7 +27,7 @@
*/
/*
- * OutVCState_d.C
+ * OutVCState_d.cc
*
* Niket Agarwal, Princeton University
*
@@ -37,10 +37,11 @@
#include "mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-OutVcState_d::OutVcState_d(int id)
+OutVcState_d::OutVcState_d(int id, GarnetNetwork_d *network_ptr)
{
+ m_network_ptr = network_ptr;
m_id = id;
m_vc_state = IDLE_;
m_time = g_eventQueue_ptr->getTime();
- m_credit_count = NetworkConfig::getBufferSize();
+ m_credit_count = m_network_ptr->getNetworkConfig()->getBufferSize();
}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh
index 959a3d643..dc64b8504 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutVcState_d.hh
@@ -37,10 +37,11 @@
#define OUT_VC_STATE_D_H
#include "mem/ruby/network/garnet-fixed-pipeline/NetworkHeader.hh"
+#include "mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh"
class OutVcState_d {
public:
- OutVcState_d(int id);
+ OutVcState_d(int id, GarnetNetwork_d *network_ptr);
int get_inport() {return m_in_port; }
int get_invc() { return m_in_vc; }
@@ -75,6 +76,7 @@ public:
}
private:
+ GarnetNetwork_d *m_network_ptr;
int m_id ;
Time m_time;
VC_state_type m_vc_state;
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc
index 1b8b8097b..eb2450897 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/OutputUnit_d.cc
@@ -27,7 +27,7 @@
*/
/*
- * OutputUnit_d.C
+ * OutputUnit_d.cc
*
* Niket Agarwal, Princeton University
*
@@ -46,7 +46,7 @@ OutputUnit_d::OutputUnit_d(int id, Router_d *router)
for(int i = 0; i < m_num_vcs; i++)
{
- m_outvc_state.insertAtBottom(new OutVcState_d(i));
+ m_outvc_state.insertAtBottom(new OutVcState_d(i, m_router->get_net_ptr()));
}
}
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc
index c59805b48..161e6ecff 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/Router_d.cc
@@ -48,10 +48,10 @@ Router_d::Router_d(int id, GarnetNetwork_d *network_ptr)
{
m_id = id;
m_network_ptr = network_ptr;
- m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS;
- m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_virtual_networks = network_ptr->getNumberOfVirtualNetworks();
+ m_vc_per_vnet = m_network_ptr->getNetworkConfig()->getVCsPerClass();
m_num_vcs = m_virtual_networks*m_vc_per_vnet;
- m_flit_width = NetworkConfig::getFlitSize();
+ m_flit_width = m_network_ptr->getNetworkConfig()->getFlitSize();
m_routing_unit = new RoutingUnit_d(this);
m_vc_alloc = new VCallocator_d(this);
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc
index 7ca74244e..dd0378305 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/SWallocator_d.cc
@@ -207,7 +207,7 @@ void SWallocator_d::check_for_wakeup()
int SWallocator_d::get_vnet(int invc)
{
- for(int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ for(int i = 0; i < RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++)
{
if(invc >= (i*m_vc_per_vnet) && invc < ((i+1)*m_vc_per_vnet))
{
diff --git a/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc b/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc
index 6f13ba14f..810aea175 100644
--- a/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc
+++ b/src/mem/ruby/network/garnet-fixed-pipeline/VCallocator_d.cc
@@ -244,7 +244,7 @@ void VCallocator_d::arbitrate_outvcs()
int VCallocator_d::get_vnet(int invc)
{
- for(int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++)
+ for(int i = 0; i < RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++)
{
if(invc >= (i*m_vc_per_vnet) && invc < ((i+1)*m_vc_per_vnet))
{
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc
index 2496cd30f..e56f5b5e8 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.cc
@@ -44,26 +44,22 @@
#include "mem/ruby/network/garnet-flexible-pipeline/NetworkLink.hh"
#include "mem/ruby/common/NetDest.hh"
-// calls new to abstract away from the network
-Network* Network::createNetwork(int nodes)
+GarnetNetwork::GarnetNetwork(const string & name)
+ : Network(name)
{
- NetworkConfig::readNetConfig();
- // Instantiate a network depending on what kind of network is requested
- if(NetworkConfig::isGarnetNetwork())
- {
- if(NetworkConfig::isDetailNetwork())
- return new GarnetNetwork_d(nodes);
- else
- return new GarnetNetwork(nodes);
- }
- else
- return new SimpleNetwork(nodes);
}
-GarnetNetwork::GarnetNetwork(int nodes)
+void GarnetNetwork::init(const vector<string> & argv)
{
- m_nodes = MachineType_base_number(MachineType_NUM); // Total nodes in network
- m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS; // Number of virtual networks = number of message classes in the coherence protocol
+// printf("hello\n");
+ Network::init(argv);
+//added by SS
+// assert (m_topology_ptr!=NULL);
+
+ m_network_config_ptr = new NetworkConfig;
+
+ m_network_config_ptr->init(argv);
+
m_ruby_start = 0;
// Allocate to and from queues
@@ -91,7 +87,8 @@ GarnetNetwork::GarnetNetwork(int nodes)
}
// Setup the network switches
- m_topology_ptr = new Topology(this, m_nodes);
+ assert (m_topology_ptr!=NULL);
+ m_topology_ptr->makeTopology();
int number_of_routers = m_topology_ptr->numSwitches();
for (int i=0; i<number_of_routers; i++) {
@@ -188,6 +185,7 @@ void GarnetNetwork::makeInternalLink(SwitchID src, SwitchID dest, const NetDest&
void GarnetNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
{
+ printf ("id = %i, m_nodes = %i \n", id, m_nodes);
ASSERT(id < m_nodes);
ASSERT(network_num < m_virtual_networks);
@@ -223,9 +221,9 @@ Time GarnetNetwork::getRubyStartTime()
void GarnetNetwork::printStats(ostream& out) const
{ double average_link_utilization = 0;
Vector<double > average_vc_load;
- average_vc_load.setSize(m_virtual_networks*NetworkConfig::getVCsPerClass());
+ average_vc_load.setSize(m_virtual_networks*m_network_config_ptr->getVCsPerClass());
- for(int i = 0; i < m_virtual_networks*NetworkConfig::getVCsPerClass(); i++)
+ for(int i = 0; i < m_virtual_networks*m_network_config_ptr->getVCsPerClass(); i++)
{
average_vc_load[i] = 0;
}
@@ -240,7 +238,7 @@ void GarnetNetwork::printStats(ostream& out) const
Vector<int > vc_load = m_link_ptr_vector[i]->getVcLoad();
for(int j = 0; j < vc_load.size(); j++)
{
- assert(vc_load.size() == NetworkConfig::getVCsPerClass()*m_virtual_networks);
+ assert(vc_load.size() == m_network_config_ptr->getVCsPerClass()*m_virtual_networks);
average_vc_load[j] += vc_load[j];
}
}
@@ -248,7 +246,7 @@ void GarnetNetwork::printStats(ostream& out) const
out << "Average Link Utilization :: " << average_link_utilization << " flits/cycle" <<endl;
out << "-------------" << endl;
- for(int i = 0; i < NetworkConfig::getVCsPerClass()*m_virtual_networks; i++)
+ for(int i = 0; i < m_network_config_ptr->getVCsPerClass()*m_virtual_networks; i++)
{
average_vc_load[i] = (double(average_vc_load[i]) / (double(g_eventQueue_ptr->getTime()) - m_ruby_start));
out << "Average VC Load [" << i << "] = " << average_vc_load[i] << " flits/cycle" << endl;
@@ -262,7 +260,7 @@ void GarnetNetwork::printConfig(ostream& out) const
out << "Network Configuration" << endl;
out << "---------------------" << endl;
out << "network: GARNET_NETWORK" << endl;
- out << "topology: " << g_NETWORK_TOPOLOGY << endl;
+ out << "topology: " << m_topology_ptr->getName() << endl;
out << endl;
for (int i = 0; i < m_virtual_networks; i++)
@@ -295,10 +293,7 @@ void GarnetNetwork::printConfig(ostream& out) const
{
m_router_ptr_vector[i]->printConfig(out);
}
- if (g_PRINT_TOPOLOGY)
- {
- m_topology_ptr->printConfig(out);
- }
+ m_topology_ptr->printConfig(out);
}
void GarnetNetwork::print(ostream& out) const
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh
index 27de3de07..194fef778 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh
@@ -50,10 +50,15 @@ class NetworkLink;
class GarnetNetwork : public Network{
public:
- GarnetNetwork(int nodes);
+ GarnetNetwork(const string & name);
~GarnetNetwork();
+ void init(const vector<string> & argv);
+
+ //added by SS
+ NetworkConfig* getNetworkConfig() { return m_network_config_ptr; }
+
// returns the queue requested for the given component
MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num);
MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num);
@@ -83,9 +88,10 @@ private:
GarnetNetwork(const GarnetNetwork& obj);
GarnetNetwork& operator=(const GarnetNetwork& obj);
+
/***********Data Members*************/
- int m_virtual_networks;
- int m_nodes;
+// int m_virtual_networks;
+// int m_nodes;
Vector<bool> m_in_use;
Vector<bool> m_ordered;
@@ -97,8 +103,10 @@ private:
Vector<NetworkLink *> m_link_ptr_vector; // All links in the network
Vector<NetworkInterface *> m_ni_ptr_vector; // All NI's in Network
- Topology* m_topology_ptr;
+// Topology* m_topology_ptr;
Time m_ruby_start;
+
+ NetworkConfig* m_network_config_ptr;
};
// Output operator declaration
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
index 270fd6429..33af28a7e 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
@@ -43,14 +43,35 @@
#include "mem/ruby/config/RubyConfig.hh"
class NetworkConfig {
+ private:
+ int m_flit_size;
+ int m_number_of_pipe_stages;
+ int m_vcs_per_class;
+ int m_buffer_size;
+ bool m_using_network_testing;
public:
- static bool isGarnetNetwork() {return g_GARNET_NETWORK; }
- static bool isDetailNetwork() {return g_DETAIL_NETWORK; }
- static int isNetworkTesting() {return g_NETWORK_TESTING; }
- static int getFlitSize() {return g_FLIT_SIZE; }
- static int getNumPipeStages() {return g_NUM_PIPE_STAGES; }
- static int getVCsPerClass() {return g_VCS_PER_CLASS; }
- static int getBufferSize() {return g_BUFFER_SIZE; }
+ NetworkConfig(){}
+ void init(const vector<string> & argv) {
+ for (size_t i=0; i<argv.size(); i+=2) {
+ if (argv[i] == "flit_size")
+ m_flit_size = atoi(argv[i+1].c_str());
+ else if (argv[i] == "number_of_pipe_stages")
+ m_number_of_pipe_stages = atoi(argv[i+1].c_str());
+ else if (argv[i] == "vcs_per_class")
+ m_vcs_per_class = atoi(argv[i+1].c_str());
+ else if (argv[i] == "buffer_size")
+ m_buffer_size = atoi(argv[i+1].c_str());
+ else if (argv[i] == "using_network_testing")
+ m_using_network_testing = atoi(argv[i+1].c_str());
+ }
+ }
+// static bool isGarnetNetwork() {return RubyConfig::getUsingGarnetNetwork(); }
+// static bool isDetailNetwork() {return RubyConfig::getUsingDetailNetwork(); }
+ bool isNetworkTesting() {return m_using_network_testing; }
+ int getFlitSize() {return m_flit_size; }
+ int getNumPipeStages() {return m_number_of_pipe_stages; }
+ int getVCsPerClass() {return m_vcs_per_class; }
+ int getBufferSize() {return m_buffer_size; }
// This is no longer used. See config/rubyconfig.defaults to set Garnet parameters.
static void readNetConfig()
{
@@ -58,6 +79,9 @@ class NetworkConfig {
string filename = "network/garnet-flexible-pipeline/";
filename += NETCONFIG_DEFAULTS;
+ if (g_SIMICS) {
+ filename = "../../../ruby/"+filename;
+ }
ifstream NetconfigFile( filename.c_str(), ios::in);
if(!NetconfigFile.is_open())
{
@@ -73,19 +97,19 @@ class NetworkConfig {
getline(NetconfigFile, line, '\n');
string var = string_split(line, ':');
- if(!var.compare("g_GARNET_NETWORK"))
+ if(!var.compare("RubyConfig::getUsingGarnetNetwork()"))
{
if(!line.compare("true"))
- g_GARNET_NETWORK = true;
+ RubyConfig::getUsingGarnetNetwork() = true;
else
- g_GARNET_NETWORK = false;
+ RubyConfig::getUsingGarnetNetwork() = false;
}
- if(!var.compare("g_DETAIL_NETWORK"))
+ if(!var.compare("RubyConfig::getUsingDetailNetwork()"))
{
if(!line.compare("true"))
- g_DETAIL_NETWORK = true;
+ RubyConfig::getUsingDetailNetwork() = true;
else
- g_DETAIL_NETWORK = false;
+ RubyConfig::getUsingDetailNetwork() = false;
}
if(!var.compare("g_NETWORK_TESTING"))
{
@@ -94,27 +118,28 @@ class NetworkConfig {
else
g_NETWORK_TESTING = false;
}
- if(!var.compare("g_FLIT_SIZE"))
- g_FLIT_SIZE = atoi(line.c_str());
- if(!var.compare("g_NUM_PIPE_STAGES"))
- g_NUM_PIPE_STAGES = atoi(line.c_str());
- if(!var.compare("g_VCS_PER_CLASS"))
- g_VCS_PER_CLASS = atoi(line.c_str());
- if(!var.compare("g_BUFFER_SIZE"))
- g_BUFFER_SIZE = atoi(line.c_str());
+ if(!var.compare("RubyConfig::getFlitSize()"))
+ RubyConfig::getFlitSize() = atoi(line.c_str());
+ if(!var.compare("RubyConfig::getNumberOfPipeStages()"))
+ RubyConfig::getNumberOfPipeStages() = atoi(line.c_str());
+ if(!var.compare("RubyConfig::getVCSPerClass()"))
+ RubyConfig::getVCSPerClass() = atoi(line.c_str());
+ if(!var.compare("RubyConfig::getBufferSize()"))
+ RubyConfig::getBufferSize() = atoi(line.c_str());
}
NetconfigFile.close();
*/
/*
- cout << "g_GARNET_NETWORK = " << g_GARNET_NETWORK << endl;
- cout << "g_DETAIL_NETWORK = " << g_DETAIL_NETWORK << endl;
+ cout << "RubyConfig::getUsingGarnetNetwork() = " << RubyConfig::getUsingGarnetNetwork() << endl;
+ cout << "RubyConfig::getUsingDetailNetwork() = " << RubyConfig::getUsingDetailNetwork() << endl;
cout << "g_NETWORK_TESTING = " << g_NETWORK_TESTING << endl;
- cout << "g_FLIT_SIZE = " << g_FLIT_SIZE << endl;
- cout << "g_NUM_PIPE_STAGES = " << g_NUM_PIPE_STAGES << endl;
- cout << "g_VCS_PER_CLASS= " << g_VCS_PER_CLASS << endl;
- cout << "g_BUFFER_SIZE = " << g_BUFFER_SIZE << endl;
+ cout << "RubyConfig::getFlitSize() = " << RubyConfig::getFlitSize() << endl;
+ cout << "RubyConfig::getNumberOfPipeStages() = " << RubyConfig::getNumberOfPipeStages() << endl;
+ cout << "RubyConfig::getVCSPerClass()= " << RubyConfig::getVCSPerClass() << endl;
+ cout << "RubyConfig::getBufferSize() = " << RubyConfig::getBufferSize() << endl;
*/
}
};
+
#endif
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc
index b7bd2393c..119f064d3 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkInterface.cc
@@ -27,7 +27,7 @@
*/
/*
- * NetworkInterface.C
+ * NetworkInterface.cc
*
* Niket Agarwal, Princeton University
*
@@ -43,7 +43,7 @@ NetworkInterface::NetworkInterface(int id, int virtual_networks, GarnetNetwork *
m_id = id;
m_net_ptr = network_ptr;
m_virtual_networks = virtual_networks;
- m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_vc_per_vnet = m_net_ptr->getNetworkConfig()->getVCsPerClass();
m_num_vcs = m_vc_per_vnet*m_virtual_networks;
m_vc_round_robin = 0;
@@ -109,7 +109,7 @@ bool NetworkInterface::flitisizeMessage(MsgPtr msg_ptr, int vnet)
NetworkMessage *net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
NetDest net_msg_dest = net_msg_ptr->getInternalDestination();
Vector<NodeID> dest_nodes = net_msg_dest.getAllDest(); // gets all the destinations associated with this message.
- int num_flits = (int) ceil((double) MessageSizeType_to_int(net_msg_ptr->getMessageSize())/NetworkConfig::getFlitSize() ); // Number of flits is dependent on the link bandwidth available. This is expressed in terms of bytes/cycle or the flit size
+ int num_flits = (int) ceil((double) MessageSizeType_to_int(net_msg_ptr->getMessageSize())/m_net_ptr->getNetworkConfig()->getFlitSize() ); // Number of flits is dependent on the link bandwidth available. This is expressed in terms of bytes/cycle or the flit size
for(int ctr = 0; ctr < dest_nodes.size(); ctr++) // loop because we will be converting all multicast messages into unicast messages
{
@@ -236,7 +236,7 @@ void NetworkInterface::wakeup()
DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id);
DEBUG_MSG(NETWORK_COMP, HighPrio, "Message got delivered");
DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
- if(!NetworkConfig::isNetworkTesting()) // When we are doing network only testing, the messages do not have to be buffered into the message buffers
+ if(!m_net_ptr->getNetworkConfig()->isNetworkTesting()) // When we are doing network only testing, the messages do not have to be buffered into the message buffers
{
outNode_ptr[t_flit->get_vnet()]->enqueue(t_flit->get_msg_ptr(), 1); // enqueueing for protocol buffer. This is not required when doing network only testing
}
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc
index 90177cbec..ddc92d44c 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkLink.cc
@@ -46,8 +46,8 @@ NetworkLink::NetworkLink(int id, int latency, GarnetNetwork *net_ptr)
m_link_utilized = 0;
m_net_ptr = net_ptr;
m_latency = latency;
- int num_net = NUMBER_OF_VIRTUAL_NETWORKS;
- int num_vc = NetworkConfig::getVCsPerClass();
+ int num_net = net_ptr->getNumberOfVirtualNetworks();
+ int num_vc = m_net_ptr->getNetworkConfig()->getVCsPerClass();
m_vc_load.setSize(num_net*num_vc);
for(int i = 0; i < num_net*num_vc; i++)
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc b/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc
index 1def6f9c3..ea32e938d 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/Router.cc
@@ -43,8 +43,8 @@ Router::Router(int id, GarnetNetwork *network_ptr)
{
m_id = id;
m_net_ptr = network_ptr;
- m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS;
- m_vc_per_vnet = NetworkConfig::getVCsPerClass();
+ m_virtual_networks = m_net_ptr->getNumberOfVirtualNetworks();
+ m_vc_per_vnet = m_net_ptr->getNetworkConfig()->getVCsPerClass();
m_round_robin_inport = 0;
m_round_robin_start = 0;
m_num_vcs = m_vc_per_vnet*m_virtual_networks;
@@ -103,7 +103,7 @@ void Router::addOutPort(NetworkLink *out_link, const NetDest& routing_table_entr
Vector<flitBuffer *> intermediateQueues;
for(int i = 0; i < m_num_vcs; i++)
{
- intermediateQueues.insertAtBottom(new flitBuffer(NetworkConfig::getBufferSize()));
+ intermediateQueues.insertAtBottom(new flitBuffer(m_net_ptr->getNetworkConfig()->getBufferSize()));
}
m_router_buffers.insertAtBottom(intermediateQueues);
@@ -246,17 +246,17 @@ void Router::routeCompute(flit *m_flit, int inport)
int outport = m_in_vc_state[inport][invc]->get_outport();
int outvc = m_in_vc_state[inport][invc]->get_outvc();
- assert(NetworkConfig::getNumPipeStages() >= 1);
- m_flit->set_time(g_eventQueue_ptr->getTime() + (NetworkConfig::getNumPipeStages() - 1)); // Becasuse 1 cycle will be consumed in scheduling the output link
+ assert(m_net_ptr->getNetworkConfig()->getNumPipeStages() >= 1);
+ m_flit->set_time(g_eventQueue_ptr->getTime() + (m_net_ptr->getNetworkConfig()->getNumPipeStages() - 1)); // Becasuse 1 cycle will be consumed in scheduling the output link
m_flit->set_vc(outvc);
m_router_buffers[outport][outvc]->insert(m_flit);
- if(NetworkConfig::getNumPipeStages() > 1)
- g_eventQueue_ptr->scheduleEvent(this, NetworkConfig::getNumPipeStages() -1 );
+ if(m_net_ptr->getNetworkConfig()->getNumPipeStages() > 1)
+ g_eventQueue_ptr->scheduleEvent(this, m_net_ptr->getNetworkConfig()->getNumPipeStages() -1 );
if((m_flit->get_type() == HEAD_) || (m_flit->get_type() == HEAD_TAIL_))
{
NetDest destination = dynamic_cast<NetworkMessage*>(m_flit->get_msg_ptr().ref())->getInternalDestination();
- if(NetworkConfig::getNumPipeStages() > 1)
+ if(m_net_ptr->getNetworkConfig()->getNumPipeStages() > 1)
{
m_out_vc_state[outport][outvc]->setState(VC_AB_, g_eventQueue_ptr->getTime() + 1);
m_out_link[outport]->request_vc_link(outvc, destination, g_eventQueue_ptr->getTime() + 1);
diff --git a/src/mem/ruby/network/simple/CustomTopology.cc b/src/mem/ruby/network/simple/CustomTopology.cc
new file mode 100644
index 000000000..829086f31
--- /dev/null
+++ b/src/mem/ruby/network/simple/CustomTopology.cc
@@ -0,0 +1,140 @@
+
+#include "mem/ruby/network/simple/CustomTopology.hh"
+#include "mem/protocol/MachineType.hh"
+
+static const int INFINITE_LATENCY = 10000; // Yes, this is a big hack
+static const int DEFAULT_BW_MULTIPLIER = 1; // Just to be consistent with above :)
+
+// make a network as described by the networkFile
+void CustomTopology::construct()
+{
+
+ Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
+ Vector<int> latencies; // link latencies for each link extracted
+ Vector<int> bw_multis; // bw multipliers for each link extracted
+ Vector<int> weights; // link weights used to enfore e-cube deadlock free routing
+ Vector< SwitchID > int_network_switches; // internal switches extracted from the file
+ Vector<bool> endpointConnectionExist; // used to ensure all endpoints are connected to the network
+
+ endpointConnectionExist.setSize(m_nodes);
+
+ // initialize endpoint check vector
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ endpointConnectionExist[k] = false;
+ }
+
+ stringstream networkFile( m_connections );
+
+ string line = "";
+
+ while (!networkFile.eof()) {
+
+ Vector < SwitchID > nodes;
+ nodes.setSize(2);
+ int latency = -1; // null latency
+ int weight = -1; // null weight
+ int bw_multiplier = DEFAULT_BW_MULTIPLIER; // default multiplier incase the network file doesn't define it
+ int i = 0; // node pair index
+ int varsFound = 0; // number of varsFound on the line
+ int internalNodes = 0; // used to determine if the link is between 2 internal nodes
+ std::getline(networkFile, line, '\n');
+ string varStr = string_split(line, ' ');
+
+ // parse the current line in the file
+ while (varStr != "") {
+ string label = string_split(varStr, ':');
+
+ // valid node labels
+ if (label == "ext_node" || label == "int_node") {
+ ASSERT(i < 2); // one link between 2 switches per line
+ varsFound++;
+ bool isNewIntSwitch = true;
+ if (label == "ext_node") { // input link to node
+ MachineType machine = string_to_MachineType(string_split(varStr, ':'));
+ string nodeStr = string_split(varStr, ':');
+ nodes[i] = MachineType_base_number(machine)
+ + atoi(nodeStr.c_str());
+
+ // in nodes should be numbered 0 to m_nodes-1
+ ASSERT(nodes[i] >= 0 && nodes[i] < m_nodes);
+ isNewIntSwitch = false;
+ endpointConnectionExist[nodes[i]] = true;
+ }
+ if (label == "int_node") { // interior node
+ nodes[i] = atoi((string_split(varStr, ':')).c_str())+m_nodes*2;
+ // in nodes should be numbered >= m_nodes*2
+ ASSERT(nodes[i] >= m_nodes*2);
+ for (int k = 0; k < int_network_switches.size(); k++) {
+ if (int_network_switches[k] == nodes[i]) {
+ isNewIntSwitch = false;
+ }
+ }
+ if (isNewIntSwitch) { // if internal switch
+ m_number_of_switches++;
+ int_network_switches.insertAtBottom(nodes[i]);
+ }
+ internalNodes++;
+ }
+ i++;
+ } else if (label == "link_latency") {
+ latency = atoi((string_split(varStr, ':')).c_str());
+ varsFound++;
+ } else if (label == "bw_multiplier") { // not necessary, defaults to DEFAULT_BW_MULTIPLIER
+ bw_multiplier = atoi((string_split(varStr, ':')).c_str());
+ } else if (label == "link_weight") { // not necessary, defaults to link_latency
+ weight = atoi((string_split(varStr, ':')).c_str());
+ } else {
+ cerr << "Error: Unexpected Identifier: " << label << endl;
+ exit(1);
+ }
+ varStr = string_split(line, ' ');
+ }
+ if (varsFound == 3) { // all three necessary link variables where found so add the link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ if (weight != -1) {
+ weights.insertAtBottom(weight);
+ } else {
+ weights.insertAtBottom(latency);
+ }
+ bw_multis.insertAtBottom(bw_multiplier);
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ if (internalNodes == 2) { // this is an internal link
+ otherDirectionNodes[1] = nodes[0];
+ } else {
+ otherDirectionNodes[1] = nodes[0]+m_nodes;
+ }
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ if (weight != -1) {
+ weights.insertAtBottom(weight);
+ } else {
+ weights.insertAtBottom(latency);
+ }
+ bw_multis.insertAtBottom(bw_multiplier);
+ } else {
+ if (varsFound != 0) { // if this is not a valid link, then no vars should have been found
+ cerr << "Error in line: " << line << endl;
+ exit(1);
+ }
+ }
+ } // end of file
+
+ // makes sure all enpoints are connected in the soon to be created network
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ if (endpointConnectionExist[k] == false) {
+ cerr << "Error: Unconnected Endpoint: " << k << endl;
+ exit(1);
+ }
+ }
+
+ ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size() && latencies.size() == weights.size())
+ for (int k = 0; k < nodePairs.size(); k++) {
+ ASSERT(nodePairs[k].size() == 2);
+ addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k], weights[k]);
+ }
+
+ // networkFile.close();
+}
diff --git a/src/mem/ruby/network/simple/CustomTopology.hh b/src/mem/ruby/network/simple/CustomTopology.hh
new file mode 100644
index 000000000..6c44eb049
--- /dev/null
+++ b/src/mem/ruby/network/simple/CustomTopology.hh
@@ -0,0 +1,17 @@
+
+#ifndef CUSTOMTOPOLOGY_H
+#define CUSTOMTOPOLOGY_H
+
+#include "mem/ruby/network/simple/Topology.hh"
+
+class CustomTopology : public Topology
+{
+public:
+ CustomTopology(const string & name);
+ void init(const vector<string> & argv);
+
+protected:
+ void construct();
+};
+
+#endif
diff --git a/src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc b/src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc
new file mode 100644
index 000000000..c0190e789
--- /dev/null
+++ b/src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc
@@ -0,0 +1,66 @@
+
+#include "mem/ruby/network/simple/HierarchicalSwitchTopology.hh"
+
+// hierarchical switch topology
+void Topology::construct(int fan_out_degree)
+{
+ // Make a row of switches with only one input. This extra row makes
+ // sure the links out of the nodes have latency and limited
+ // bandwidth.
+
+ // number of inter-chip switches, i.e. the last row of switches
+ Vector<SwitchID> last_level;
+ for (int i=0; i<m_nodes; i++) {
+ SwitchID new_switch = newSwitchID(); // internal switch id #
+ addLink(i, new_switch, m_network_ptr->getLinkLatency());
+ last_level.insertAtBottom(new_switch);
+ }
+
+ // Create Hierarchical Switches
+
+ // start from the bottom level and work up to root
+ Vector<SwitchID> next_level;
+ while(last_level.size() > 1) {
+ for (int i=0; i<last_level.size(); i++) {
+ if ((i % fan_out_degree) == 0) {
+ next_level.insertAtBottom(newSwitchID());
+ }
+ // Add this link to the last switch we created
+ addLink(last_level[i], next_level[next_level.size()-1], m_network_ptr->getLinkLatency());
+ }
+
+ // Make the current level the last level to get ready for next
+ // iteration
+ last_level = next_level;
+ next_level.clear();
+ }
+
+ SwitchID root_switch = last_level[0];
+
+ Vector<SwitchID> out_level;
+ for (int i=0; i<m_nodes; i++) {
+ out_level.insertAtBottom(m_nodes+i);
+ }
+
+ // Build the down network from the endpoints to the root
+ while(out_level.size() != 1) {
+
+ // A level of switches
+ for (int i=0; i<out_level.size(); i++) {
+ if ((i % fan_out_degree) == 0) {
+ if (out_level.size() > fan_out_degree) {
+ next_level.insertAtBottom(newSwitchID());
+ } else {
+ next_level.insertAtBottom(root_switch);
+ }
+ }
+ // Add this link to the last switch we created
+ addLink(next_level[next_level.size()-1], out_level[i], m_network_ptr->getLinkLatency());
+ }
+
+ // Make the current level the last level to get ready for next
+ // iteration
+ out_level = next_level;
+ next_level.clear();
+ }
+}
diff --git a/src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh b/src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh
new file mode 100644
index 000000000..0c2c84ef8
--- /dev/null
+++ b/src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh
@@ -0,0 +1,17 @@
+
+#ifndef HIERARCHICALSWITCHTOPOLOGY_H
+#define HIERARCHICALSWITCHTOPOLOGY_H
+
+#include "mem/ruby/network/simple/Topology.hh"
+
+class HierarchicalSwitchTopology : public Topology
+{
+public:
+ HierarchicalSwitchTopology(const string & name);
+ void init(const vector<string> & argv);
+
+protected:
+ void construct();
+};
+
+#endif
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index b561b69e2..4fd9af3eb 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -55,7 +55,7 @@ bool operator<(const LinkOrder& l1, const LinkOrder& l2) {
PerfectSwitch::PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr)
{
- m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS; // FIXME - pass me as a parameter?
+ m_virtual_networks = network_ptr->getNumberOfVirtualNetworks();
m_switch_id = sid;
m_round_robin_start = 0;
m_network_ptr = network_ptr;
@@ -88,9 +88,9 @@ void PerfectSwitch::addOutPort(const Vector<MessageBuffer*>& out, const NetDest&
m_out.insertAtBottom(out);
m_routing_table.insertAtBottom(routing_table_entry);
- if (g_PRINT_TOPOLOGY) {
+ // if (RubyConfig::getPrintTopology()) {
m_out_link_vec.insertAtBottom(out);
- }
+ // }
}
void PerfectSwitch::clearRoutingTables()
@@ -134,6 +134,7 @@ void PerfectSwitch::wakeup()
int highest_prio_vnet = m_virtual_networks-1;
int lowest_prio_vnet = 0;
int decrementer = 1;
+ bool schedule_wakeup = false;
NetworkMessage* net_msg_ptr = NULL;
// invert priorities to avoid starvation seen in the component network
@@ -186,8 +187,9 @@ void PerfectSwitch::wakeup()
assert(m_link_order.size() == m_routing_table.size());
assert(m_link_order.size() == m_out.size());
-
- if (g_adaptive_routing) {
+//changed by SS
+// if (RubyConfig::getAdaptiveRouting()) {
+ if (m_network_ptr->getAdaptiveRouting()) {
if (m_network_ptr->isVNetOrdered(vnet)) {
// Don't adaptively route
for (int outlink=0; outlink<m_out.size(); outlink++) {
diff --git a/src/mem/ruby/network/simple/PtToPtTopology.cc b/src/mem/ruby/network/simple/PtToPtTopology.cc
new file mode 100644
index 000000000..9d178dbcc
--- /dev/null
+++ b/src/mem/ruby/network/simple/PtToPtTopology.cc
@@ -0,0 +1,82 @@
+
+#include "mem/protocol/MachineType.hh"
+#include "mem/ruby/network/simple/PtToPtTopology.hh"
+
+// one internal node per chip, point to point links between chips
+void PtToPtTopology::construct()
+{
+ Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
+ Vector<int> latencies; // link latencies for each link extracted
+ Vector<int> bw_multis; // bw multipliers for each link extracted
+
+ Vector < SwitchID > nodes;
+ nodes.setSize(2);
+
+ // number of inter-chip switches
+ int numberOfChipSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
+ // two switches per machine node grouping
+ // one intra-chip switch and one inter-chip switch per chip
+ for(int i=0; i<numberOfChipSwitches; i++){
+ SwitchID new_switch = newSwitchID();
+ new_switch = newSwitchID();
+ }
+
+ makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfChipSwitches);
+
+ // connect intra-chip switch to inter-chip switch
+ for (int chip = 0; chip < RubyConfig::getNumberOfChips(); chip++) {
+
+ int latency = m_network_ptr->getOnChipLinkLatency(); // internal link latency
+ int bw_multiplier = 10; // external link bw multiplier of the global bandwidth
+
+ nodes[0] = chip+m_nodes*2;
+ nodes[1] = chip+m_nodes*2+RubyConfig::getNumberOfChips();
+
+ // insert link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // opposite direction link
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ otherDirectionNodes[1] = nodes[0];
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+ }
+
+ // point-to-point network between chips
+ for (int chip = 0; chip < RubyConfig::getNumberOfChips(); chip++) {
+ for (int other_chip = chip+1; other_chip < RubyConfig::getNumberOfChips(); other_chip++) {
+
+ int latency = m_network_ptr->getOffChipLinkLatency(); // external link latency
+ int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
+
+ nodes[0] = chip+m_nodes*2+RubyConfig::getNumberOfChips();
+ nodes[1] = other_chip+m_nodes*2+RubyConfig::getNumberOfChips();
+
+ // insert link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // opposite direction link
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ otherDirectionNodes[1] = nodes[0];
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+ }
+ }
+
+ // add links
+ ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
+ for (int k = 0; k < nodePairs.size(); k++) {
+ ASSERT(nodePairs[k].size() == 2);
+ addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
+ }
+}
diff --git a/src/mem/ruby/network/simple/PtToPtTopology.hh b/src/mem/ruby/network/simple/PtToPtTopology.hh
new file mode 100644
index 000000000..f15fa5956
--- /dev/null
+++ b/src/mem/ruby/network/simple/PtToPtTopology.hh
@@ -0,0 +1,17 @@
+
+#ifndef PTTOPTTOPOLOGY_H
+#define PTTOPTTOPOLOGY_H
+
+#include "mem/ruby/network/simple/Topology.hh"
+
+class PtToPtTopology : public Topology
+{
+public:
+ PtToPtTopology(const string & name);
+ void init(const vector<string> & argv);
+
+protected:
+ void construct();
+};
+
+#endif
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.cc b/src/mem/ruby/network/simple/SimpleNetwork.cc
index 1d0567b6e..1a3b876bf 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.cc
+++ b/src/mem/ruby/network/simple/SimpleNetwork.cc
@@ -59,11 +59,55 @@ Network* Network::createNetwork(int nodes)
}
*/
+SimpleNetwork::SimpleNetwork(const string & name)
+ : Network(name)
+{
+ m_virtual_networks = 0;
+ m_topology_ptr = NULL;
+}
+
+void SimpleNetwork::init(const vector<string> & argv)
+{
+
+ Network::init(argv);
+
+ m_endpoint_switches.setSize(m_nodes);
+
+ m_in_use.setSize(m_virtual_networks);
+ m_ordered.setSize(m_virtual_networks);
+ for (int i = 0; i < m_virtual_networks; i++) {
+ m_in_use[i] = false;
+ m_ordered[i] = false;
+ }
+
+ // Allocate to and from queues
+ m_toNetQueues.setSize(m_nodes);
+ m_fromNetQueues.setSize(m_nodes);
+ for (int node = 0; node < m_nodes; node++) {
+ m_toNetQueues[node].setSize(m_virtual_networks);
+ m_fromNetQueues[node].setSize(m_virtual_networks);
+ for (int j = 0; j < m_virtual_networks; j++) {
+ cerr << "Creating new MessageBuffer for " << node << " " << j << endl;
+ m_toNetQueues[node][j] = new MessageBuffer;
+ m_fromNetQueues[node][j] = new MessageBuffer;
+ }
+ }
+
+ // Setup the network switches
+ // m_topology_ptr = new Topology(this, m_nodes);
+ m_topology_ptr->makeTopology();
+ int number_of_switches = m_topology_ptr->numSwitches();
+ for (int i=0; i<number_of_switches; i++) {
+ m_switch_ptr_vector.insertAtBottom(new Switch(i, this));
+ }
+ m_topology_ptr->createLinks(false); // false because this isn't a reconfiguration
+}
+/*
SimpleNetwork::SimpleNetwork(int nodes)
{
m_nodes = MachineType_base_number(MachineType_NUM);
- m_virtual_networks = NUMBER_OF_VIRTUAL_NETWORKS;
+ m_virtual_networks = RubyConfig::getNumberOfVirtualNetworks();
m_endpoint_switches.setSize(m_nodes);
m_in_use.setSize(m_virtual_networks);
@@ -93,7 +137,7 @@ SimpleNetwork::SimpleNetwork(int nodes)
}
m_topology_ptr->createLinks(false); // false because this isn't a reconfiguration
}
-
+*/
void SimpleNetwork::reset()
{
for (int node = 0; node < m_nodes; node++) {
@@ -154,8 +198,8 @@ void SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, const NetDest&
// allocate a buffer
MessageBuffer* buffer_ptr = new MessageBuffer;
buffer_ptr->setOrdering(true);
- if(FINITE_BUFFERING) {
- buffer_ptr->setSize(FINITE_BUFFER_SIZE);
+ if (m_buffer_size > 0) {
+ buffer_ptr->setSize(m_buffer_size);
}
queues.insertAtBottom(buffer_ptr);
// remember to deallocate it
@@ -225,7 +269,7 @@ void SimpleNetwork::printConfig(ostream& out) const
out << "Network Configuration" << endl;
out << "---------------------" << endl;
out << "network: SIMPLE_NETWORK" << endl;
- out << "topology: " << g_NETWORK_TOPOLOGY << endl;
+ out << "topology: " << m_topology_ptr->getName() << endl;
out << endl;
for (int i = 0; i < m_virtual_networks; i++) {
@@ -246,9 +290,7 @@ void SimpleNetwork::printConfig(ostream& out) const
m_switch_ptr_vector[i]->printConfig(out);
}
- if (g_PRINT_TOPOLOGY) {
- m_topology_ptr->printConfig(out);
- }
+ m_topology_ptr->printConfig(out);
}
void SimpleNetwork::print(ostream& out) const
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.hh b/src/mem/ruby/network/simple/SimpleNetwork.hh
index 39ee2c095..9ffd862d3 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.hh
+++ b/src/mem/ruby/network/simple/SimpleNetwork.hh
@@ -83,11 +83,14 @@ class Topology;
class SimpleNetwork : public Network {
public:
// Constructors
- SimpleNetwork(int nodes);
+ // SimpleNetwork(int nodes);
+ SimpleNetwork(const string & name);
// Destructor
~SimpleNetwork();
+ void init(const vector<string> & argv);
+
// Public Methods
void printStats(ostream& out) const;
void clearStats();
@@ -130,14 +133,11 @@ private:
Vector<Vector<MessageBuffer*> > m_toNetQueues;
Vector<Vector<MessageBuffer*> > m_fromNetQueues;
- int m_nodes;
- int m_virtual_networks;
Vector<bool> m_in_use;
Vector<bool> m_ordered;
Vector<Switch*> m_switch_ptr_vector;
Vector<MessageBuffer*> m_buffers_to_free;
Vector<Switch*> m_endpoint_switches;
- Topology* m_topology_ptr;
};
// Output operator declaration
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index 192520e85..e3420ddae 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -82,8 +82,9 @@ void Switch::addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routin
MessageBuffer* buffer_ptr = new MessageBuffer;
// Make these queues ordered
buffer_ptr->setOrdering(true);
- if(FINITE_BUFFERING) {
- buffer_ptr->setSize(FINITE_BUFFER_SIZE);
+ Network* net_ptr = RubySystem::getNetwork();
+ if(net_ptr->getBufferSize() > 0) {
+ buffer_ptr->setSize(net_ptr->getBufferSize());
}
intermediateBuffers.insertAtBottom(buffer_ptr);
m_buffers_to_free.insertAtBottom(buffer_ptr);
diff --git a/src/mem/ruby/network/simple/Switch.hh b/src/mem/ruby/network/simple/Switch.hh
index 58bde05ea..193898928 100644
--- a/src/mem/ruby/network/simple/Switch.hh
+++ b/src/mem/ruby/network/simple/Switch.hh
@@ -52,6 +52,7 @@ class PerfectSwitch;
class NetDest;
class SimpleNetwork;
class Throttle;
+class Network;
class Switch {
public:
@@ -83,6 +84,7 @@ private:
// Data Members (m_ prefix)
PerfectSwitch* m_perfect_switch_ptr;
+ Network* m_network_ptr;
Vector<Throttle*> m_throttles;
Vector<MessageBuffer*> m_buffers_to_free;
SwitchID m_switch_id;
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index f91bbdb30..1cfe88987 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -103,9 +103,9 @@ void Throttle::addLinks(const Vector<MessageBuffer*>& in_vec, const Vector<Messa
}
}
- if (g_PRINT_TOPOLOGY) {
+ // if (RubyConfig::getPrintTopology()) {
m_out_link_vec.insertAtBottom(out_vec);
- }
+ // }
}
void Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
@@ -206,13 +206,8 @@ void Throttle::wakeup()
while ((current_time - m_last_bandwidth_sample) > ADJUST_INTERVAL) {
double utilization = m_bandwidth_since_sample/double(ADJUST_INTERVAL * getLinkBandwidth());
- if (utilization > g_bash_bandwidth_adaptive_threshold) {
- // Used more bandwidth
- m_bash_counter++;
- } else {
- // Used less bandwidth
- m_bash_counter--;
- }
+ // Used less bandwidth
+ m_bash_counter--;
// Make sure we don't overflow
m_bash_counter = min(HIGH_RANGE, m_bash_counter);
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
index 067c7af5f..7b6d04353 100644
--- a/src/mem/ruby/network/simple/Throttle.hh
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -46,7 +46,8 @@
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/network/Network.hh"
class MessageBuffer;
@@ -68,7 +69,7 @@ public:
void clearStats();
void printConfig(ostream& out) const;
double getUtilization() const; // The average utilization (a percent) since last clearStats()
- int getLinkBandwidth() const { return g_endpoint_bandwidth * m_link_bandwidth_multiplier; }
+ int getLinkBandwidth() const { return RubySystem::getNetwork()->getEndpointBandwidth() * m_link_bandwidth_multiplier; }
int getLatency() const { return m_link_latency; }
const Vector<Vector<int> >& getCounters() const { return m_message_counters; }
diff --git a/src/mem/ruby/network/simple/Topology.cc b/src/mem/ruby/network/simple/Topology.cc
index f887f2a04..742bfe3cd 100644
--- a/src/mem/ruby/network/simple/Topology.cc
+++ b/src/mem/ruby/network/simple/Topology.cc
@@ -40,10 +40,11 @@
#include "mem/ruby/common/NetDest.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/protocol/TopologyType.hh"
-#include "mem/ruby/config/RubyConfig.hh"
+//#include "mem/ruby/config/RubyConfig.hh"
#include "mem/gems_common/util.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/Protocol.hh"
+#include "mem/ruby/system/System.hh"
#include <string>
static const int INFINITE_LATENCY = 10000; // Yes, this is a big hack
@@ -57,359 +58,45 @@ static const int DEFAULT_BW_MULTIPLIER = 1; // Just to be consistent with above
// of the network.
// Helper functions based on chapter 29 of Cormen et al.
-static void extend_shortest_path(Matrix& current_dist, Matrix& latencies, Matrix& inter_switches);
+static Matrix extend_shortest_path(const Matrix& current_dist, Matrix& latencies, Matrix& inter_switches);
static Matrix shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches);
static bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final, const Matrix& weights, const Matrix& dist);
static NetDest shortest_path_to_node(SwitchID src, SwitchID next, const Matrix& weights, const Matrix& dist);
-
-Topology::Topology(Network* network_ptr, int number_of_nodes)
+Topology::Topology(const string & name)
+ : m_name(name)
{
- m_network_ptr = network_ptr;
- m_nodes = number_of_nodes;
+ m_network_ptr = NULL;
+ m_nodes = MachineType_base_number(MachineType_NUM);
m_number_of_switches = 0;
- init();
-}
-
-void Topology::init()
-{
- if (m_nodes == 1) {
- SwitchID id = newSwitchID();
- addLink(0, id, NETWORK_LINK_LATENCY);
- addLink(id, 1, NETWORK_LINK_LATENCY);
- return;
- }
-
- // topology-specific set-up
- TopologyType topology = string_to_TopologyType(g_NETWORK_TOPOLOGY);
- switch (topology) {
- case TopologyType_TORUS_2D:
- make2DTorus();
- break;
- case TopologyType_HIERARCHICAL_SWITCH:
- makeHierarchicalSwitch(FAN_OUT_DEGREE);
- break;
- case TopologyType_CROSSBAR:
- makeHierarchicalSwitch(1024);
- break;
- case TopologyType_PT_TO_PT:
- makePtToPt();
- break;
- case TopologyType_FILE_SPECIFIED:
- makeFileSpecified();
- break;
- default:
- ERROR_MSG("Unexpected typology type")
- }
-
- // initialize component latencies record
- m_component_latencies.setSize(0);
- m_component_inter_switches.setSize(0);
-}
-
-void Topology::makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChipSwitches)
-{
-
- Vector < SwitchID > nodes; // temporary buffer
- nodes.setSize(2);
-
- Vector<bool> endpointConnectionExist; // used to ensure all endpoints are connected to the network
- endpointConnectionExist.setSize(m_nodes);
- // initialize endpoint check vector
- for (int k = 0; k < endpointConnectionExist.size(); k++) {
- endpointConnectionExist[k] = false;
- }
-
- Vector<int> componentCount;
- componentCount.setSize(MachineType_NUM);
- for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
- componentCount[mType] = 0;
- }
-
- // components to/from network links
- for (int chip = 0; chip < RubyConfig::numberOfChips(); chip++) {
- for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
- for (int component = 0; component < MachineType_chip_count(mType, chip); component++) {
-
- int latency = -1;
- int bw_multiplier = -1; // internal link bw multiplier of the global bandwidth
- if (mType != MachineType_Directory) {
- latency = ON_CHIP_LINK_LATENCY; // internal link latency
- bw_multiplier = 10; // internal link bw multiplier of the global bandwidth
- } else {
- latency = NETWORK_LINK_LATENCY; // local memory latency
- bw_multiplier = 1; // local memory link bw multiplier of the global bandwidth
- }
- nodes[0] = MachineType_base_number(mType)+componentCount[mType];
- nodes[1] = chip+m_nodes*2; // this is the chip's internal switch id #
-
- // insert link
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- //bw_multis.insertAtBottom(bw_multiplier);
- bw_multis.insertAtBottom(componentCount[mType]+MachineType_base_number((MachineType)mType));
-
- // opposite direction link
- Vector < SwitchID > otherDirectionNodes;
- otherDirectionNodes.setSize(2);
- otherDirectionNodes[0] = nodes[1];
- otherDirectionNodes[1] = nodes[0]+m_nodes;
- nodePairs.insertAtBottom(otherDirectionNodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- assert(!endpointConnectionExist[nodes[0]]);
- endpointConnectionExist[nodes[0]] = true;
- componentCount[mType]++;
- }
- }
- }
-
- // make sure all enpoints are connected in the soon to be created network
- for (int k = 0; k < endpointConnectionExist.size(); k++) {
- if (endpointConnectionExist[k] == false) {
- cerr << "Error: Unconnected Endpoint: " << k << endl;
- exit(1);
- }
- }
-
- // secondary check to ensure we saw the correct machine counts
- for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
- assert(componentCount[mType] == MachineType_base_count((MachineType)mType));
- }
-
-}
-
-// 2D torus topology
-
-void Topology::make2DTorus()
-{
- Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
- Vector<int> latencies; // link latencies for each link extracted
- Vector<int> bw_multis; // bw multipliers for each link extracted
-
- Vector < SwitchID > nodes; // temporary buffer
- nodes.setSize(2);
-
- // number of inter-chip switches
- int numberOfTorusSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
- // one switch per machine node grouping
- Vector<SwitchID> torusSwitches;
- for(int i=0; i<numberOfTorusSwitches; i++){
- SwitchID new_switch = newSwitchID();
- torusSwitches.insertAtBottom(new_switch);
- }
-
- makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfTorusSwitches);
-
- int lengthOfSide = (int)sqrt((double)numberOfTorusSwitches);
-
- // Now connect the inter-chip torus links
-
- int latency = NETWORK_LINK_LATENCY; // external link latency
- int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
-
- for(int i=0; i<numberOfTorusSwitches; i++){
- nodes[0] = torusSwitches[i]; // current switch
-
- // left
- if(nodes[0]%lengthOfSide == 0){ // determine left neighbor
- nodes[1] = nodes[0] - 1 + lengthOfSide;
- } else {
- nodes[1] = nodes[0] - 1;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // right
- if((nodes[0] + 1)%lengthOfSide == 0){ // determine right neighbor
- nodes[1] = nodes[0] + 1 - lengthOfSide;
- } else {
- nodes[1] = nodes[0] + 1;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // top
- if(nodes[0] - lengthOfSide < 2*m_nodes){ // determine if node is on the top
- nodes[1] = nodes[0] - lengthOfSide + (lengthOfSide*lengthOfSide);
- } else {
- nodes[1] = nodes[0] - lengthOfSide;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // bottom
- if(nodes[0] + lengthOfSide >= 2*m_nodes+numberOfTorusSwitches){ // determine if node is on the bottom
- // sorin: bad bug if this is a > instead of a >=
- nodes[1] = nodes[0] + lengthOfSide - (lengthOfSide*lengthOfSide);
- } else {
- nodes[1] = nodes[0] + lengthOfSide;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- }
-
- // add links
- ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
- for (int k = 0; k < nodePairs.size(); k++) {
- ASSERT(nodePairs[k].size() == 2);
- addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
- }
-
}
-// hierarchical switch topology
-void Topology::makeHierarchicalSwitch(int fan_out_degree)
+void Topology::init(const vector<string> & argv)
{
- // Make a row of switches with only one input. This extra row makes
- // sure the links out of the nodes have latency and limited
- // bandwidth.
-
- // number of inter-chip switches, i.e. the last row of switches
- Vector<SwitchID> last_level;
- for (int i=0; i<m_nodes; i++) {
- SwitchID new_switch = newSwitchID(); // internal switch id #
- addLink(i, new_switch, NETWORK_LINK_LATENCY);
- last_level.insertAtBottom(new_switch);
- }
-
- // Create Hierarchical Switches
-
- // start from the bottom level and work up to root
- Vector<SwitchID> next_level;
- while(last_level.size() > 1) {
- for (int i=0; i<last_level.size(); i++) {
- if ((i % fan_out_degree) == 0) {
- next_level.insertAtBottom(newSwitchID());
- }
- // Add this link to the last switch we created
- addLink(last_level[i], next_level[next_level.size()-1], NETWORK_LINK_LATENCY);
- }
-
- // Make the current level the last level to get ready for next
- // iteration
- last_level = next_level;
- next_level.clear();
- }
-
- SwitchID root_switch = last_level[0];
-
- Vector<SwitchID> out_level;
- for (int i=0; i<m_nodes; i++) {
- out_level.insertAtBottom(m_nodes+i);
- }
-
- // Build the down network from the endpoints to the root
- while(out_level.size() != 1) {
-
- // A level of switches
- for (int i=0; i<out_level.size(); i++) {
- if ((i % fan_out_degree) == 0) {
- if (out_level.size() > fan_out_degree) {
- next_level.insertAtBottom(newSwitchID());
- } else {
- next_level.insertAtBottom(root_switch);
- }
- }
- // Add this link to the last switch we created
- addLink(next_level[next_level.size()-1], out_level[i], NETWORK_LINK_LATENCY);
+ for (size_t i=0; i<argv.size(); i+=2) {
+ if (argv[i] == "network")
+ m_network_ptr = RubySystem::getNetwork();
+ else if (argv[i] == "connections")
+ m_connections = argv[i+1];
+ else if (argv[i] == "print_config") {
+ m_print_config = string_to_bool(argv[i+1]);
+ cerr << "print config: " << m_print_config << endl;
}
-
- // Make the current level the last level to get ready for next
- // iteration
- out_level = next_level;
- next_level.clear();
}
+ assert(m_network_ptr != NULL);
}
-// one internal node per chip, point to point links between chips
-void Topology::makePtToPt()
+void Topology::makeTopology()
{
- Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
- Vector<int> latencies; // link latencies for each link extracted
- Vector<int> bw_multis; // bw multipliers for each link extracted
-
- Vector < SwitchID > nodes;
- nodes.setSize(2);
-
- // number of inter-chip switches
- int numberOfChipSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
- // two switches per machine node grouping
- // one intra-chip switch and one inter-chip switch per chip
- for(int i=0; i<numberOfChipSwitches; i++){
- SwitchID new_switch = newSwitchID();
- new_switch = newSwitchID();
- }
-
- makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfChipSwitches);
-
- // connect intra-chip switch to inter-chip switch
- for (int chip = 0; chip < RubyConfig::numberOfChips(); chip++) {
-
- int latency = ON_CHIP_LINK_LATENCY; // internal link latency
- int bw_multiplier = 10; // external link bw multiplier of the global bandwidth
-
- nodes[0] = chip+m_nodes*2;
- nodes[1] = chip+m_nodes*2+RubyConfig::numberOfChips();
-
- // insert link
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // opposite direction link
- Vector < SwitchID > otherDirectionNodes;
- otherDirectionNodes.setSize(2);
- otherDirectionNodes[0] = nodes[1];
- otherDirectionNodes[1] = nodes[0];
- nodePairs.insertAtBottom(otherDirectionNodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
- }
-
- // point-to-point network between chips
- for (int chip = 0; chip < RubyConfig::numberOfChips(); chip++) {
- for (int other_chip = chip+1; other_chip < RubyConfig::numberOfChips(); other_chip++) {
-
- int latency = NETWORK_LINK_LATENCY; // external link latency
- int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
-
- nodes[0] = chip+m_nodes*2+RubyConfig::numberOfChips();
- nodes[1] = other_chip+m_nodes*2+RubyConfig::numberOfChips();
-
- // insert link
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // opposite direction link
- Vector < SwitchID > otherDirectionNodes;
- otherDirectionNodes.setSize(2);
- otherDirectionNodes[0] = nodes[1];
- otherDirectionNodes[1] = nodes[0];
- nodePairs.insertAtBottom(otherDirectionNodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
- }
- }
-
- // add links
- ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
- for (int k = 0; k < nodePairs.size(); k++) {
- ASSERT(nodePairs[k].size() == 2);
- addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
+ /*
+ if (m_nodes == 1) {
+ SwitchID id = newSwitchID();
+ addLink(0, id, m_network_ptr->getOffChipLinkLatency());
+ addLink(id, 1, m_network_ptr->getOffChipLinkLatency());
+ return;
}
-}
-
-// make a network as described by the networkFile
-void Topology::makeFileSpecified()
-{
+ */
+ assert(m_nodes > 1);
Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
Vector<int> latencies; // link latencies for each link extracted
@@ -425,21 +112,7 @@ void Topology::makeFileSpecified()
endpointConnectionExist[k] = false;
}
- string filename = "network/simple/Network_Files/";
- filename = filename+g_CACHE_DESIGN
- +"_Procs-"+int_to_string(RubyConfig::numberOfProcessors())
- +"_ProcsPerChip-"+int_to_string(RubyConfig::numberOfProcsPerChip())
- +"_L2Banks-"+int_to_string(RubyConfig::numberOfL2Cache())
- +"_Memories-"+int_to_string(RubyConfig::numberOfMemories())
- +".txt";
-
- ifstream networkFile( filename.c_str() , ios::in);
- if (!networkFile.is_open()) {
- cerr << "Error: Could not open network file: " << filename << endl;
- cerr << "Probably no network file exists for " << RubyConfig::numberOfProcessors()
- << " processors and " << RubyConfig::numberOfProcsPerChip() << " procs per chip " << endl;
- exit(1);
- }
+ stringstream networkFile( m_connections );
string line = "";
@@ -468,14 +141,9 @@ void Topology::makeFileSpecified()
if (label == "ext_node") { // input link to node
MachineType machine = string_to_MachineType(string_split(varStr, ':'));
string nodeStr = string_split(varStr, ':');
- if (string_split(varStr, ':') == "bank") {
- nodes[i] = MachineType_base_number(machine)
- + atoi(nodeStr.c_str())
- + atoi((string_split(varStr, ':')).c_str())*RubyConfig::numberOfChips();
- } else {
- nodes[i] = MachineType_base_number(machine)
- + atoi(nodeStr.c_str());
- }
+ nodes[i] = MachineType_base_number(machine)
+ + atoi(nodeStr.c_str());
+
// in nodes should be numbered 0 to m_nodes-1
ASSERT(nodes[i] >= 0 && nodes[i] < m_nodes);
isNewIntSwitch = false;
@@ -504,16 +172,6 @@ void Topology::makeFileSpecified()
bw_multiplier = atoi((string_split(varStr, ':')).c_str());
} else if (label == "link_weight") { // not necessary, defaults to link_latency
weight = atoi((string_split(varStr, ':')).c_str());
- } else if (label == "processors") {
- ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfProcessors());
- } else if (label == "bw_unit") {
- ASSERT(atoi((string_split(varStr, ':')).c_str()) == g_endpoint_bandwidth);
- } else if (label == "procs_per_chip") {
- ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfProcsPerChip());
- } else if (label == "L2banks") {
- ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfL2Cache());
- } else if (label == "memories") {
- ASSERT(atoi((string_split(varStr, ':')).c_str()) == RubyConfig::numberOfMemories());
} else {
cerr << "Error: Unexpected Identifier: " << label << endl;
exit(1);
@@ -567,9 +225,12 @@ void Topology::makeFileSpecified()
addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k], weights[k]);
}
- networkFile.close();
+ // initialize component latencies record
+ m_component_latencies.setSize(0);
+ m_component_inter_switches.setSize(0);
}
+
void Topology::createLinks(bool isReconfiguration)
{
// Find maximum switchID
@@ -633,6 +294,82 @@ void Topology::createLinks(bool isReconfiguration)
}
}
}
+/*
+void Topology::makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChipSwitches)
+{
+
+ Vector < SwitchID > nodes; // temporary buffer
+ nodes.setSize(2);
+
+ Vector<bool> endpointConnectionExist; // used to ensure all endpoints are connected to the network
+ endpointConnectionExist.setSize(m_nodes);
+ // initialize endpoint check vector
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ endpointConnectionExist[k] = false;
+ }
+
+ Vector<int> componentCount;
+ componentCount.setSize(MachineType_NUM);
+ for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
+ componentCount[mType] = 0;
+ }
+
+ // components to/from network links
+ // TODO: drh5: bring back chips!!!
+ for (int chip = 0; chip < RubyConfig::getNumberOfChips(); chip++) {
+ for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
+ for (int component = 0; component < MachineType_base_count(mType); component++) {
+
+ int latency = -1;
+ int bw_multiplier = -1; // internal link bw multiplier of the global bandwidth
+ if (mType != MachineType_Directory) {
+ latency = RubyConfig::getOnChipLinkLatency(); // internal link latency
+ bw_multiplier = 10; // internal link bw multiplier of the global bandwidth
+ } else {
+ latency = RubyConfig::getNetworkLinkLatency(); // local memory latency
+ bw_multiplier = 1; // local memory link bw multiplier of the global bandwidth
+ }
+ nodes[0] = MachineType_base_number(mType)+componentCount[mType];
+ nodes[1] = chip+m_nodes*2; // this is the chip's internal switch id #
+
+ // insert link
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+ //bw_multis.insertAtBottom(componentCount[mType]+MachineType_base_number((MachineType)mType));
+
+ // opposite direction link
+ Vector < SwitchID > otherDirectionNodes;
+ otherDirectionNodes.setSize(2);
+ otherDirectionNodes[0] = nodes[1];
+ otherDirectionNodes[1] = nodes[0]+m_nodes;
+ nodePairs.insertAtBottom(otherDirectionNodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ assert(!endpointConnectionExist[nodes[0]]);
+ endpointConnectionExist[nodes[0]] = true;
+ componentCount[mType]++;
+ }
+ }
+ }
+
+ // make sure all enpoints are connected in the soon to be created network
+ for (int k = 0; k < endpointConnectionExist.size(); k++) {
+ if (endpointConnectionExist[k] == false) {
+ cerr << "Error: Unconnected Endpoint: " << k << endl;
+ exit(1);
+ }
+ }
+
+ // secondary check to ensure we saw the correct machine counts
+ for (MachineType mType = MachineType_FIRST; mType < MachineType_NUM; ++mType) {
+ assert(componentCount[mType] == MachineType_base_count((MachineType)mType));
+ }
+
+}
+*/
+
SwitchID Topology::newSwitchID()
{
@@ -680,6 +417,8 @@ void Topology::makeLink(SwitchID src, SwitchID dest, const NetDest& routing_tabl
void Topology::printConfig(ostream& out) const
{
+ if (m_print_config == false) return;
+
assert(m_component_latencies.size() > 0);
out << "--- Begin Topology Print ---" << endl;
diff --git a/src/mem/ruby/network/simple/Topology.hh b/src/mem/ruby/network/simple/Topology.hh
index cb6c36f17..0f8cdff3b 100644
--- a/src/mem/ruby/network/simple/Topology.hh
+++ b/src/mem/ruby/network/simple/Topology.hh
@@ -59,21 +59,26 @@ typedef Vector < Vector <int> > Matrix;
class Topology {
public:
// Constructors
- Topology(Network* network_ptr, int number_of_nodes);
+ // Topology(Network* network_ptr, int number_of_nodes);
+ Topology(const string & name);
// Destructor
- ~Topology() {}
+ virtual ~Topology() {}
+
+ virtual void init(const vector<string> & argv);
// Public Methods
+ void makeTopology();
int numSwitches() const { return m_number_of_switches; }
void createLinks(bool isReconfiguration);
+ const string getName() { return m_name; }
void printStats(ostream& out) const {}
void clearStats() {}
void printConfig(ostream& out) const;
void print(ostream& out) const { out << "[Topology]"; }
-private:
+protected:
// Private Methods
void init();
SwitchID newSwitchID();
@@ -82,12 +87,7 @@ private:
void addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier, int link_weight);
void makeLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int weight, int bw_multiplier, bool isReconfiguration);
- void makeHierarchicalSwitch(int fan_out_degree);
- void make2DTorus();
- void makePtToPt();
- void makeFileSpecified();
-
- void makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChips);
+ // void makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChips);
string getDesignStr();
// Private copy constructor and assignment operator
@@ -95,7 +95,10 @@ private:
Topology& operator=(const Topology& obj);
// Data Members (m_ prefix)
+ string m_name;
+ bool m_print_config;
Network* m_network_ptr;
+ string m_connections;
NodeID m_nodes;
int m_number_of_switches;
diff --git a/src/mem/ruby/network/simple/Torus2DTopology.cc b/src/mem/ruby/network/simple/Torus2DTopology.cc
new file mode 100644
index 000000000..e66c6dc0b
--- /dev/null
+++ b/src/mem/ruby/network/simple/Torus2DTopology.cc
@@ -0,0 +1,84 @@
+
+// 2D torus topology
+
+void Torus2DTopology::construct()
+{
+ Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
+ Vector<int> latencies; // link latencies for each link extracted
+ Vector<int> bw_multis; // bw multipliers for each link extracted
+
+ Vector < SwitchID > nodes; // temporary buffer
+ nodes.setSize(2);
+
+ // number of inter-chip switches
+ int numberOfTorusSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
+ // one switch per machine node grouping
+ Vector<SwitchID> torusSwitches;
+ for(int i=0; i<numberOfTorusSwitches; i++){
+ SwitchID new_switch = newSwitchID();
+ torusSwitches.insertAtBottom(new_switch);
+ }
+
+ makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfTorusSwitches);
+
+ int lengthOfSide = (int)sqrt((double)numberOfTorusSwitches);
+
+ // Now connect the inter-chip torus links
+
+ int latency = m_network_ptr->getLinkLatency(); // external link latency
+ int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
+
+ for(int i=0; i<numberOfTorusSwitches; i++){
+ nodes[0] = torusSwitches[i]; // current switch
+
+ // left
+ if(nodes[0]%lengthOfSide == 0){ // determine left neighbor
+ nodes[1] = nodes[0] - 1 + lengthOfSide;
+ } else {
+ nodes[1] = nodes[0] - 1;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // right
+ if((nodes[0] + 1)%lengthOfSide == 0){ // determine right neighbor
+ nodes[1] = nodes[0] + 1 - lengthOfSide;
+ } else {
+ nodes[1] = nodes[0] + 1;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // top
+ if(nodes[0] - lengthOfSide < 2*m_nodes){ // determine if node is on the top
+ nodes[1] = nodes[0] - lengthOfSide + (lengthOfSide*lengthOfSide);
+ } else {
+ nodes[1] = nodes[0] - lengthOfSide;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ // bottom
+ if(nodes[0] + lengthOfSide >= 2*m_nodes+numberOfTorusSwitches){ // determine if node is on the bottom
+ // sorin: bad bug if this is a > instead of a >=
+ nodes[1] = nodes[0] + lengthOfSide - (lengthOfSide*lengthOfSide);
+ } else {
+ nodes[1] = nodes[0] + lengthOfSide;
+ }
+ nodePairs.insertAtBottom(nodes);
+ latencies.insertAtBottom(latency);
+ bw_multis.insertAtBottom(bw_multiplier);
+
+ }
+
+ // add links
+ ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
+ for (int k = 0; k < nodePairs.size(); k++) {
+ ASSERT(nodePairs[k].size() == 2);
+ addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
+ }
+
+}
diff --git a/src/mem/ruby/network/simple/Torus2DTopology.hh b/src/mem/ruby/network/simple/Torus2DTopology.hh
new file mode 100644
index 000000000..83a314e94
--- /dev/null
+++ b/src/mem/ruby/network/simple/Torus2DTopology.hh
@@ -0,0 +1,17 @@
+
+#ifndef TORUS2DTOPOLOGY_H
+#define TORUS2DTOPOLOGY_H
+
+#include "mem/ruby/network/simple/Topology.hh"
+
+class Torus2DTopology : public Topology
+{
+public:
+ Torus2DTopology(const string & name);
+ void init(const vector<string> & argv);
+
+protected:
+ void construct();
+};
+
+#endif
diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc
index 3f6b48956..9ff10dc90 100644
--- a/src/mem/ruby/profiler/AddressProfiler.cc
+++ b/src/mem/ruby/profiler/AddressProfiler.cc
@@ -67,9 +67,16 @@ AddressProfiler::~AddressProfiler()
delete m_persistentPredictionProfileMap;
}
+void AddressProfiler::setHotLines(bool hot_lines){
+ m_hot_lines = hot_lines;
+}
+void AddressProfiler::setAllInstructions(bool all_instructions){
+ m_all_instructions = all_instructions;
+}
+
void AddressProfiler::printStats(ostream& out) const
{
- if (PROFILE_HOT_LINES) {
+ if (m_hot_lines) {
out << endl;
out << "AddressProfiler Stats" << endl;
out << "---------------------" << endl;
@@ -97,7 +104,7 @@ void AddressProfiler::printStats(ostream& out) const
printSorted(out, m_programCounterAccessTrace, "pc_address");
}
- if (PROFILE_ALL_INSTRUCTIONS){
+ if (m_all_instructions){
out << endl;
out << "All Instructions Profile:" << endl;
out << "-------------------------" << endl;
@@ -189,7 +196,7 @@ void AddressProfiler::profileGetS(const Address& datablock, const Address& PC, c
void AddressProfiler::addTraceSample(Address data_addr, Address pc_addr, CacheRequestType type, AccessModeType access_mode, NodeID id, bool sharing_miss)
{
- if (PROFILE_HOT_LINES) {
+ if (m_all_instructions) {
if (sharing_miss) {
m_sharing_miss_counter++;
}
@@ -206,7 +213,7 @@ void AddressProfiler::addTraceSample(Address data_addr, Address pc_addr, CacheRe
lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).update(type, access_mode, id, sharing_miss);
}
- if (PROFILE_ALL_INSTRUCTIONS) {
+ if (m_all_instructions) {
// This code is used if the address profiler is an all-instructions profiler
// record program counter address trace info
lookupTraceForAddress(pc_addr, m_programCounterAccessTrace).update(type, access_mode, id, sharing_miss);
@@ -248,7 +255,7 @@ static void printSorted(ostream& out, const Map<Address, AccessTraceForAddress>*
}
out << "Total_entries_" << description << ": " << keys.size() << endl;
- if (PROFILE_ALL_INSTRUCTIONS)
+ if (g_system_ptr->getProfiler()->getAllInstructions())
out << "Total_Instructions_" << description << ": " << misses << endl;
else
out << "Total_data_misses_" << description << ": " << misses << endl;
@@ -263,8 +270,8 @@ static void printSorted(ostream& out, const Map<Address, AccessTraceForAddress>*
// Allows us to track how many lines where touched by n processors
Vector<int64> m_touched_vec;
Vector<int64> m_touched_weighted_vec;
- m_touched_vec.setSize(RubyConfig::numberOfProcessors()+1);
- m_touched_weighted_vec.setSize(RubyConfig::numberOfProcessors()+1);
+ m_touched_vec.setSize(RubySystem::getNumberOfSequencers()+1);
+ m_touched_weighted_vec.setSize(RubySystem::getNumberOfSequencers()+1);
for (int i=0; i<m_touched_vec.size(); i++) {
m_touched_vec[i] = 0;
m_touched_weighted_vec[i] = 0;
diff --git a/src/mem/ruby/profiler/AddressProfiler.hh b/src/mem/ruby/profiler/AddressProfiler.hh
index a32b1184d..69bf1319a 100644
--- a/src/mem/ruby/profiler/AddressProfiler.hh
+++ b/src/mem/ruby/profiler/AddressProfiler.hh
@@ -69,6 +69,10 @@ public:
void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
void print(ostream& out) const;
+
+ //added by SS
+ void setHotLines(bool hot_lines);
+ void setAllInstructions(bool all_instructions);
private:
// Private Methods
@@ -90,6 +94,10 @@ private:
Histogram m_retryProfileHistoRead;
Histogram m_getx_sharing_histogram;
Histogram m_gets_sharing_histogram;
+//added by SS
+ bool m_hot_lines;
+ bool m_all_instructions;
+
};
// Output operator declaration
diff --git a/src/mem/ruby/profiler/Profiler.cc b/src/mem/ruby/profiler/Profiler.cc
index ddbaebd6b..3494104e1 100644
--- a/src/mem/ruby/profiler/Profiler.cc
+++ b/src/mem/ruby/profiler/Profiler.cc
@@ -58,7 +58,6 @@
#include "mem/ruby/network/Network.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/protocol/CacheMsg.hh"
-#include "mem/ruby/common/Driver.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/gems_common/util.hh"
#include "mem/gems_common/Map.hh"
@@ -73,27 +72,34 @@ extern std::ostream * debug_cout_ptr;
static double process_memory_total();
static double process_memory_resident();
-Profiler::Profiler()
+Profiler::Profiler(const string & name)
: m_conflicting_histogram(-1)
{
+ m_name = name;
m_requestProfileMap_ptr = new Map<string, int>;
m_L1D_cache_profiler_ptr = new CacheProfiler("L1D_cache");
m_L1I_cache_profiler_ptr = new CacheProfiler("L1I_cache");
m_L2_cache_profiler_ptr = new CacheProfiler("L2_cache");
+ m_inst_profiler_ptr = NULL;
+ m_address_profiler_ptr = NULL;
+
+/*
m_address_profiler_ptr = new AddressProfiler;
m_inst_profiler_ptr = NULL;
- if (PROFILE_ALL_INSTRUCTIONS) {
+ if (m_all_instructions) {
m_inst_profiler_ptr = new AddressProfiler;
}
-
+*/
m_conflicting_map_ptr = new Map<Address, Time>;
m_real_time_start_time = time(NULL); // Not reset in clearStats()
m_stats_period = 1000000; // Default
m_periodic_output_file_ptr = &cerr;
+//changed by SS
+/*
// for MemoryControl:
m_memReq = 0;
m_memBankBusy = 0;
@@ -116,8 +122,7 @@ Profiler::Profiler()
* RubyConfig::ranksPerDimm()
* RubyConfig::dimmsPerChannel();
m_memBankCount.setSize(totalBanks);
-
- clearStats();
+*/
}
Profiler::~Profiler()
@@ -133,32 +138,97 @@ Profiler::~Profiler()
delete m_conflicting_map_ptr;
}
+void Profiler::init(const vector<string> & argv, vector<string> memory_control_names)
+{
+ // added by SS
+ vector<string>::iterator it;
+ memory_control_profiler* mcp;
+ m_memory_control_names = memory_control_names;
+// printf ( "Here in Profiler::init \n");
+ for ( it=memory_control_names.begin() ; it < memory_control_names.end(); it++ ){
+// printf ( "Here in Profiler::init memory control name %s \n", (*it).c_str());
+ mcp = new memory_control_profiler;
+ mcp->m_memReq = 0;
+ mcp->m_memBankBusy = 0;
+ mcp->m_memBusBusy = 0;
+ mcp->m_memReadWriteBusy = 0;
+ mcp->m_memDataBusBusy = 0;
+ mcp->m_memTfawBusy = 0;
+ mcp->m_memRefresh = 0;
+ mcp->m_memRead = 0;
+ mcp->m_memWrite = 0;
+ mcp->m_memWaitCycles = 0;
+ mcp->m_memInputQ = 0;
+ mcp->m_memBankQ = 0;
+ mcp->m_memArbWait = 0;
+ mcp->m_memRandBusy = 0;
+ mcp->m_memNotOld = 0;
+
+ mcp->m_banks_per_rank = RubySystem::getMemoryControl((*it).c_str())->getBanksPerRank();
+ mcp->m_ranks_per_dimm = RubySystem::getMemoryControl((*it).c_str())->getRanksPerDimm();
+ mcp->m_dimms_per_channel = RubySystem::getMemoryControl((*it).c_str())->getDimmsPerChannel();
+
+ int totalBanks = mcp->m_banks_per_rank
+ * mcp->m_ranks_per_dimm
+ * mcp->m_dimms_per_channel;
+
+ mcp->m_memBankCount.setSize(totalBanks);
+
+ m_memory_control_profilers [(*it).c_str()] = mcp;
+ }
+
+ clearStats();
+ m_hot_lines = false;
+ m_all_instructions = false;
+
+ for (size_t i=0; i<argv.size(); i+=2) {
+ if ( argv[i] == "hot_lines") {
+ m_hot_lines = (argv[i+1]=="true");
+ } else if ( argv[i] == "all_instructions") {
+ m_all_instructions = (argv[i+1]=="true");
+ }else {
+ cerr << "WARNING: Profiler: Unkown configuration parameter: " << argv[i] << endl;
+ assert(false);
+ }
+ }
+
+ m_address_profiler_ptr = new AddressProfiler;
+ m_address_profiler_ptr -> setHotLines(m_hot_lines);
+ m_address_profiler_ptr -> setAllInstructions(m_all_instructions);
+
+ if (m_all_instructions) {
+ m_inst_profiler_ptr = new AddressProfiler;
+ m_inst_profiler_ptr -> setHotLines(m_hot_lines);
+ m_inst_profiler_ptr -> setAllInstructions(m_all_instructions);
+ }
+}
+
void Profiler::wakeup()
{
// FIXME - avoid the repeated code
Vector<integer_t> perProcInstructionCount;
- perProcInstructionCount.setSize(RubyConfig::numberOfProcessors());
+ perProcInstructionCount.setSize(RubySystem::getNumberOfSequencers());
Vector<integer_t> perProcCycleCount;
- perProcCycleCount.setSize(RubyConfig::numberOfProcessors());
+ perProcCycleCount.setSize(RubySystem::getNumberOfSequencers());
- for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
- perProcInstructionCount[i] = g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
- perProcCycleCount[i] = g_system_ptr->getDriver()->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
+ for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
+ perProcInstructionCount[i] = g_system_ptr->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
+ perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
integer_t total_misses = m_perProcTotalMisses.sum();
integer_t instruction_executed = perProcInstructionCount.sum();
- integer_t cycles_executed = perProcCycleCount.sum();
+ integer_t simics_cycles_executed = perProcCycleCount.sum();
integer_t transactions_started = m_perProcStartTransaction.sum();
integer_t transactions_ended = m_perProcEndTransaction.sum();
(*m_periodic_output_file_ptr) << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl;
(*m_periodic_output_file_ptr) << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
(*m_periodic_output_file_ptr) << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
- (*m_periodic_output_file_ptr) << "cycles_executed: " << cycles_executed << " " << perProcCycleCount << endl;
+ (*m_periodic_output_file_ptr) << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
(*m_periodic_output_file_ptr) << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
(*m_periodic_output_file_ptr) << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
(*m_periodic_output_file_ptr) << "L1TBE_usage: " << m_L1tbeProfile << endl;
@@ -172,7 +242,7 @@ void Profiler::wakeup()
*m_periodic_output_file_ptr << endl;
- if (PROFILE_ALL_INSTRUCTIONS) {
+ if (m_all_instructions) {
m_inst_profiler_ptr->printStats(*m_periodic_output_file_ptr);
}
@@ -277,18 +347,18 @@ void Profiler::printStats(ostream& out, bool short_stats)
Vector<double> perProcCyclesPerTrans;
Vector<double> perProcMissesPerTrans;
- perProcInstructionCount.setSize(RubyConfig::numberOfProcessors());
- perProcCycleCount.setSize(RubyConfig::numberOfProcessors());
- perProcCPI.setSize(RubyConfig::numberOfProcessors());
- perProcMissesPerInsn.setSize(RubyConfig::numberOfProcessors());
+ perProcInstructionCount.setSize(RubySystem::getNumberOfSequencers());
+ perProcCycleCount.setSize(RubySystem::getNumberOfSequencers());
+ perProcCPI.setSize(RubySystem::getNumberOfSequencers());
+ perProcMissesPerInsn.setSize(RubySystem::getNumberOfSequencers());
- perProcInsnPerTrans.setSize(RubyConfig::numberOfProcessors());
- perProcCyclesPerTrans.setSize(RubyConfig::numberOfProcessors());
- perProcMissesPerTrans.setSize(RubyConfig::numberOfProcessors());
+ perProcInsnPerTrans.setSize(RubySystem::getNumberOfSequencers());
+ perProcCyclesPerTrans.setSize(RubySystem::getNumberOfSequencers());
+ perProcMissesPerTrans.setSize(RubySystem::getNumberOfSequencers());
- for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
- perProcInstructionCount[i] = g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
- perProcCycleCount[i] = g_system_ptr->getDriver()->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
+ for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
+ perProcInstructionCount[i] = g_system_ptr->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
+ perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
perProcCPI[i] = double(ruby_cycles)/perProcInstructionCount[i];
perProcMissesPerInsn[i] = 1000.0 * (double(m_perProcTotalMisses[i]) / double(perProcInstructionCount[i]));
@@ -309,12 +379,12 @@ void Profiler::printStats(ostream& out, bool short_stats)
integer_t user_misses = m_perProcUserMisses.sum();
integer_t supervisor_misses = m_perProcSupervisorMisses.sum();
integer_t instruction_executed = perProcInstructionCount.sum();
- integer_t cycles_executed = perProcCycleCount.sum();
+ integer_t simics_cycles_executed = perProcCycleCount.sum();
integer_t transactions_started = m_perProcStartTransaction.sum();
integer_t transactions_ended = m_perProcEndTransaction.sum();
double instructions_per_transaction = (transactions_ended != 0) ? double(instruction_executed) / double(transactions_ended) : 0;
- double cycles_per_transaction = (transactions_ended != 0) ? (RubyConfig::numberOfProcessors() * double(ruby_cycles)) / double(transactions_ended) : 0;
+ double cycles_per_transaction = (transactions_ended != 0) ? (RubySystem::getNumberOfSequencers() * double(ruby_cycles)) / double(transactions_ended) : 0;
double misses_per_transaction = (transactions_ended != 0) ? double(total_misses) / double(transactions_ended) : 0;
out << "Total_misses: " << total_misses << endl;
@@ -323,8 +393,8 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "supervisor_misses: " << supervisor_misses << " " << m_perProcSupervisorMisses << endl;
out << endl;
out << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
- out << "cycles_executed: " << cycles_executed << " " << perProcCycleCount << endl;
- out << "cycles_per_instruction: " << (RubyConfig::numberOfProcessors()*double(ruby_cycles))/double(instruction_executed) << " " << perProcCPI << endl;
+ out << "ruby_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
+ out << "cycles_per_instruction: " << (RubySystem::getNumberOfSequencers()*double(ruby_cycles))/double(instruction_executed) << " " << perProcCPI << endl;
out << "misses_per_thousand_instructions: " << 1000.0 * (double(total_misses) / double(instruction_executed)) << " " << perProcMissesPerInsn << endl;
out << endl;
out << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
@@ -341,44 +411,64 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << endl;
- if (m_memReq || m_memRefresh) { // if there's a memory controller at all
- long long int total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles;
- double stallsPerReq = total_stalls * 1.0 / m_memReq;
- out << "Memory control:" << endl;
- out << " memory_total_requests: " << m_memReq << endl; // does not include refreshes
- out << " memory_reads: " << m_memRead << endl;
- out << " memory_writes: " << m_memWrite << endl;
- out << " memory_refreshes: " << m_memRefresh << endl;
- out << " memory_total_request_delays: " << total_stalls << endl;
- out << " memory_delays_per_request: " << stallsPerReq << endl;
- out << " memory_delays_in_input_queue: " << m_memInputQ << endl;
- out << " memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl;
- out << " memory_delays_stalled_at_head_of_bank_queue: " << m_memWaitCycles << endl;
- // Note: The following "memory stalls" entries are a breakdown of the
- // cycles which already showed up in m_memWaitCycles. The order is
- // significant; it is the priority of attributing the cycles.
- // For example, bank_busy is before arbitration because if the bank was
- // busy, we didn't even check arbitration.
- // Note: "not old enough" means that since we grouped waiting heads-of-queues
- // into batches to avoid starvation, a request in a newer batch
- // didn't try to arbitrate yet because there are older requests waiting.
- out << " memory_stalls_for_bank_busy: " << m_memBankBusy << endl;
- out << " memory_stalls_for_random_busy: " << m_memRandBusy << endl;
- out << " memory_stalls_for_anti_starvation: " << m_memNotOld << endl;
- out << " memory_stalls_for_arbitration: " << m_memArbWait << endl;
- out << " memory_stalls_for_bus: " << m_memBusBusy << endl;
- out << " memory_stalls_for_tfaw: " << m_memTfawBusy << endl;
- out << " memory_stalls_for_read_write_turnaround: " << m_memReadWriteBusy << endl;
- out << " memory_stalls_for_read_read_turnaround: " << m_memDataBusBusy << endl;
- out << " accesses_per_bank: ";
- for (int bank=0; bank < m_memBankCount.size(); bank++) {
- out << m_memBankCount[bank] << " ";
- //if ((bank % 8) == 7) out << " " << endl;
+ vector<string>::iterator it;
+
+ for ( it=m_memory_control_names.begin() ; it < m_memory_control_names.end(); it++ ){
+ long long int m_memReq = m_memory_control_profilers[(*it).c_str()] -> m_memReq;
+ long long int m_memRefresh = m_memory_control_profilers[(*it).c_str()] -> m_memRefresh;
+ long long int m_memInputQ = m_memory_control_profilers[(*it).c_str()] -> m_memInputQ;
+ long long int m_memBankQ = m_memory_control_profilers[(*it).c_str()] -> m_memBankQ;
+ long long int m_memWaitCycles = m_memory_control_profilers[(*it).c_str()] -> m_memWaitCycles;
+ long long int m_memRead = m_memory_control_profilers[(*it).c_str()] -> m_memRead;
+ long long int m_memWrite = m_memory_control_profilers[(*it).c_str()] -> m_memWrite;
+ long long int m_memBankBusy = m_memory_control_profilers[(*it).c_str()] -> m_memBankBusy;
+ long long int m_memRandBusy = m_memory_control_profilers[(*it).c_str()] -> m_memRandBusy;
+ long long int m_memNotOld = m_memory_control_profilers[(*it).c_str()] -> m_memNotOld;
+ long long int m_memArbWait = m_memory_control_profilers[(*it).c_str()] -> m_memArbWait;
+ long long int m_memBusBusy = m_memory_control_profilers[(*it).c_str()] -> m_memBusBusy;
+ long long int m_memTfawBusy = m_memory_control_profilers[(*it).c_str()] -> m_memTfawBusy;
+ long long int m_memReadWriteBusy = m_memory_control_profilers[(*it).c_str()] -> m_memReadWriteBusy;
+ long long int m_memDataBusBusy = m_memory_control_profilers[(*it).c_str()] -> m_memDataBusBusy;
+ Vector<long long int> m_memBankCount = m_memory_control_profilers[(*it).c_str()] -> m_memBankCount;
+
+ if (m_memReq || m_memRefresh) { // if there's a memory controller at all
+ long long int total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles;
+ double stallsPerReq = total_stalls * 1.0 / m_memReq;
+ out << "Memory control:" << endl;
+ out << " memory_total_requests: " << m_memReq << endl; // does not include refreshes
+ out << " memory_reads: " << m_memRead << endl;
+ out << " memory_writes: " << m_memWrite << endl;
+ out << " memory_refreshes: " << m_memRefresh << endl;
+ out << " memory_total_request_delays: " << total_stalls << endl;
+ out << " memory_delays_per_request: " << stallsPerReq << endl;
+ out << " memory_delays_in_input_queue: " << m_memInputQ << endl;
+ out << " memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl;
+ out << " memory_delays_stalled_at_head_of_bank_queue: " << m_memWaitCycles << endl;
+ // Note: The following "memory stalls" entries are a breakdown of the
+ // cycles which already showed up in m_memWaitCycles. The order is
+ // significant; it is the priority of attributing the cycles.
+ // For example, bank_busy is before arbitration because if the bank was
+ // busy, we didn't even check arbitration.
+ // Note: "not old enough" means that since we grouped waiting heads-of-queues
+ // into batches to avoid starvation, a request in a newer batch
+ // didn't try to arbitrate yet because there are older requests waiting.
+ out << " memory_stalls_for_bank_busy: " << m_memBankBusy << endl;
+ out << " memory_stalls_for_random_busy: " << m_memRandBusy << endl;
+ out << " memory_stalls_for_anti_starvation: " << m_memNotOld << endl;
+ out << " memory_stalls_for_arbitration: " << m_memArbWait << endl;
+ out << " memory_stalls_for_bus: " << m_memBusBusy << endl;
+ out << " memory_stalls_for_tfaw: " << m_memTfawBusy << endl;
+ out << " memory_stalls_for_read_write_turnaround: " << m_memReadWriteBusy << endl;
+ out << " memory_stalls_for_read_read_turnaround: " << m_memDataBusBusy << endl;
+ out << " accesses_per_bank: ";
+ for (int bank=0; bank < m_memBankCount.size(); bank++) {
+ out << m_memBankCount[bank] << " ";
+ //if ((bank % 8) == 7) out << " " << endl;
+ }
+ out << endl;
+ out << endl;
}
- out << endl;
- out << endl;
}
-
if (!short_stats) {
out << "Busy Controller Counts:" << endl;
for(int i=0; i < MachineType_NUM; i++) {
@@ -413,7 +503,7 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "miss_latency: " << m_allMissLatencyHistogram << endl;
for(int i=0; i<m_missLatencyHistograms.size(); i++) {
if (m_missLatencyHistograms[i].size() > 0) {
- out << "miss_latency_" << CacheRequestType(i) << ": " << m_missLatencyHistograms[i] << endl;
+ out << "miss_latency_" << RubyRequestType(i) << ": " << m_missLatencyHistograms[i] << endl;
}
}
for(int i=0; i<m_machLatencyHistograms.size(); i++) {
@@ -500,11 +590,11 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "filter_action: " << m_filter_action_histogram << endl;
- if (!PROFILE_ALL_INSTRUCTIONS) {
+ if (!m_all_instructions) {
m_address_profiler_ptr->printStats(out);
}
- if (PROFILE_ALL_INSTRUCTIONS) {
+ if (m_all_instructions) {
m_inst_profiler_ptr->printStats(out);
}
@@ -550,25 +640,25 @@ void Profiler::clearStats()
m_ruby_start = g_eventQueue_ptr->getTime();
- m_instructions_executed_at_start.setSize(RubyConfig::numberOfProcessors());
- m_cycles_executed_at_start.setSize(RubyConfig::numberOfProcessors());
- for (int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ m_instructions_executed_at_start.setSize(RubySystem::getNumberOfSequencers());
+ m_cycles_executed_at_start.setSize(RubySystem::getNumberOfSequencers());
+ for (int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
if (g_system_ptr == NULL) {
m_instructions_executed_at_start[i] = 0;
m_cycles_executed_at_start[i] = 0;
} else {
- m_instructions_executed_at_start[i] = g_system_ptr->getDriver()->getInstructionCount(i);
- m_cycles_executed_at_start[i] = g_system_ptr->getDriver()->getCycleCount(i);
+ m_instructions_executed_at_start[i] = g_system_ptr->getInstructionCount(i);
+ m_cycles_executed_at_start[i] = g_system_ptr->getCycleCount(i);
}
}
- m_perProcTotalMisses.setSize(RubyConfig::numberOfProcessors());
- m_perProcUserMisses.setSize(RubyConfig::numberOfProcessors());
- m_perProcSupervisorMisses.setSize(RubyConfig::numberOfProcessors());
- m_perProcStartTransaction.setSize(RubyConfig::numberOfProcessors());
- m_perProcEndTransaction.setSize(RubyConfig::numberOfProcessors());
+ m_perProcTotalMisses.setSize(RubySystem::getNumberOfSequencers());
+ m_perProcUserMisses.setSize(RubySystem::getNumberOfSequencers());
+ m_perProcSupervisorMisses.setSize(RubySystem::getNumberOfSequencers());
+ m_perProcStartTransaction.setSize(RubySystem::getNumberOfSequencers());
+ m_perProcEndTransaction.setSize(RubySystem::getNumberOfSequencers());
- for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
+ for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
m_perProcTotalMisses[i] = 0;
m_perProcUserMisses[i] = 0;
m_perProcSupervisorMisses[i] = 0;
@@ -587,8 +677,8 @@ void Profiler::clearStats()
m_delayedCyclesHistogram.clear();
m_delayedCyclesNonPFHistogram.clear();
- m_delayedCyclesVCHistograms.setSize(NUMBER_OF_VIRTUAL_NETWORKS);
- for (int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++) {
+ m_delayedCyclesVCHistograms.setSize(RubySystem::getNetwork()->getNumberOfVirtualNetworks());
+ for (int i = 0; i < RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++) {
m_delayedCyclesVCHistograms[i].clear();
}
@@ -656,6 +746,7 @@ void Profiler::clearStats()
m_L2_cache_profiler_ptr->clearStats();
// for MemoryControl:
+/*
m_memReq = 0;
m_memBankBusy = 0;
m_memBusBusy = 0;
@@ -675,7 +766,31 @@ void Profiler::clearStats()
for (int bank=0; bank < m_memBankCount.size(); bank++) {
m_memBankCount[bank] = 0;
}
-
+*/
+//added by SS
+ vector<string>::iterator it;
+
+ for ( it=m_memory_control_names.begin() ; it < m_memory_control_names.end(); it++ ){
+ m_memory_control_profilers[(*it).c_str()] -> m_memReq = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memBankBusy = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memBusBusy = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memTfawBusy = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memReadWriteBusy = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memDataBusBusy = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memRefresh = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memRead = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memWrite = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memWaitCycles = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memInputQ = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memBankQ = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memArbWait = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memRandBusy = 0;
+ m_memory_control_profilers[(*it).c_str()] -> m_memNotOld = 0;
+
+ for (int bank=0; bank < m_memory_control_profilers[(*it).c_str()] -> m_memBankCount.size(); bank++) {
+ m_memory_control_profilers[(*it).c_str()] -> m_memBankCount[bank] = 0;
+ }
+ }
// Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared
//g_eventQueue_ptr->triggerAllEvents();
@@ -707,7 +822,7 @@ void Profiler::profileConflictingRequests(const Address& addr)
assert(addr == line_address(addr));
Time last_time = m_ruby_start;
if (m_conflicting_map_ptr->exist(addr)) {
- last_time = m_conflicting_map_ptr->lookup(addr);
+ Time last_time = m_conflicting_map_ptr->lookup(addr);
}
Time current_time = g_eventQueue_ptr->getTime();
assert (current_time - last_time > 0);
@@ -755,8 +870,8 @@ void Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
// Note: The following line should be commented out if you want to
// use the special profiling that is part of the GS320 protocol
- // NOTE: Unless PROFILE_HOT_LINES or PROFILE_ALL_INSTRUCTIONS are enabled, nothing will be profiled by the AddressProfiler
- m_address_profiler_ptr->addTraceSample(msg.getAddress(), msg.getProgramCounter(), msg.getType(), msg.getAccessMode(), id, false);
+ // NOTE: Unless PROFILE_HOT_LINES or RubyConfig::getProfileAllInstructions() are enabled, nothing will be profiled by the AddressProfiler
+ m_address_profiler_ptr->addTraceSample(msg.getLineAddress(), msg.getProgramCounter(), msg.getType(), msg.getAccessMode(), id, false);
}
}
@@ -852,14 +967,16 @@ void Profiler::bankBusy()
}
// non-zero cycle demand request
-void Profiler::missLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
+void Profiler::missLatency(Time t, RubyRequestType type)
{
m_allMissLatencyHistogram.add(t);
m_missLatencyHistograms[type].add(t);
+ /*
m_machLatencyHistograms[respondingMach].add(t);
if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
m_L2MissLatencyHistogram.add(t);
}
+ */
}
// non-zero cycle prefetch request
@@ -873,7 +990,7 @@ void Profiler::swPrefetchLatency(Time t, CacheRequestType type, GenericMachineTy
}
}
-void Profiler::profileTransition(const string& component, NodeID id, NodeID version, Address addr,
+void Profiler::profileTransition(const string& component, NodeID version, Address addr,
const string& state, const string& event,
const string& next_state, const string& note)
{
@@ -887,22 +1004,16 @@ void Profiler::profileTransition(const string& component, NodeID id, NodeID vers
(g_eventQueue_ptr->getTime() >= g_debug_ptr->getDebugTime())) {
(* debug_cout_ptr).flags(ios::right);
(* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << id << " ";
(* debug_cout_ptr) << setw(ID_SPACES) << version << " ";
(* debug_cout_ptr) << setw(COMP_SPACES) << component;
(* debug_cout_ptr) << setw(EVENT_SPACES) << event << " ";
- for (int i=0; i < RubyConfig::numberOfProcessors(); i++) {
-
- if (i == id) {
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(STATE_SPACES) << state;
- (* debug_cout_ptr) << ">";
- (* debug_cout_ptr).flags(ios::left);
- (* debug_cout_ptr) << setw(STATE_SPACES) << next_state;
- } else {
- // cout << setw(STATE_SPACES) << " " << " " << setw(STATE_SPACES) << " ";
- }
- }
+
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(STATE_SPACES) << state;
+ (* debug_cout_ptr) << ">";
+ (* debug_cout_ptr).flags(ios::left);
+ (* debug_cout_ptr) << setw(STATE_SPACES) << next_state;
+
(* debug_cout_ptr) << " " << addr << " " << note;
(* debug_cout_ptr) << endl;
@@ -949,32 +1060,11 @@ void Profiler::profileTrainingMask(const Set& pred_set)
m_explicit_training_mask.add(pred_set.count());
}
-// For MemoryControl:
-void Profiler::profileMemReq(int bank) {
- m_memReq++;
- m_memBankCount[bank]++;
-}
-
-void Profiler::profileMemBankBusy() { m_memBankBusy++; }
-void Profiler::profileMemBusBusy() { m_memBusBusy++; }
-void Profiler::profileMemReadWriteBusy() { m_memReadWriteBusy++; }
-void Profiler::profileMemDataBusBusy() { m_memDataBusBusy++; }
-void Profiler::profileMemTfawBusy() { m_memTfawBusy++; }
-void Profiler::profileMemRefresh() { m_memRefresh++; }
-void Profiler::profileMemRead() { m_memRead++; }
-void Profiler::profileMemWrite() { m_memWrite++; }
-void Profiler::profileMemWaitCycles(int cycles) { m_memWaitCycles += cycles; }
-void Profiler::profileMemInputQ(int cycles) { m_memInputQ += cycles; }
-void Profiler::profileMemBankQ(int cycles) { m_memBankQ += cycles; }
-void Profiler::profileMemArbWait(int cycles) { m_memArbWait += cycles; }
-void Profiler::profileMemRandBusy() { m_memRandBusy++; }
-void Profiler::profileMemNotOld() { m_memNotOld++; }
-
int64 Profiler::getTotalInstructionsExecuted() const
{
int64 sum = 1; // Starting at 1 allows us to avoid division by zero
- for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
- sum += (g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i]);
+ for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
+ sum += (g_system_ptr->getInstructionCount(i) - m_instructions_executed_at_start[i]);
}
return sum;
}
@@ -1014,3 +1104,51 @@ GenericRequestType Profiler::CacheRequestType_to_GenericRequestType(const CacheR
}
}
+void Profiler::rubyWatch(int id){
+ int rn_g1 = 0;//SIMICS_get_register_number(id, "g1");
+ uint64 tr = 0;//SIMICS_read_register(id, rn_g1);
+ Address watch_address = Address(tr);
+ const int ID_SPACES = 3;
+ const int TIME_SPACES = 7;
+
+ (* debug_cout_ptr).flags(ios::right);
+ (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
+ (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
+ << "RUBY WATCH "
+ << watch_address
+ << endl;
+
+ if(!m_watch_address_list_ptr->exist(watch_address)){
+ m_watch_address_list_ptr->add(watch_address, 1);
+ }
+}
+
+bool Profiler::watchAddress(Address addr){
+ if (m_watch_address_list_ptr->exist(addr))
+ return true;
+ else
+ return false;
+}
+
+// For MemoryControl:
+void Profiler::profileMemReq(string name, int bank) {
+// printf("name is %s", name.c_str());
+ assert(m_memory_control_profilers.count(name) == 1);
+ m_memory_control_profilers[name] -> m_memReq++;
+ m_memory_control_profilers[name] -> m_memBankCount[bank]++;
+}
+void Profiler::profileMemBankBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memBankBusy++; }
+void Profiler::profileMemBusBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memBusBusy++; }
+void Profiler::profileMemReadWriteBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memReadWriteBusy++; }
+void Profiler::profileMemDataBusBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memDataBusBusy++; }
+void Profiler::profileMemTfawBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memTfawBusy++; }
+void Profiler::profileMemRefresh(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memRefresh++; }
+void Profiler::profileMemRead(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memRead++; }
+void Profiler::profileMemWrite(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memWrite++; }
+void Profiler::profileMemWaitCycles(string name, int cycles) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memWaitCycles += cycles; }
+void Profiler::profileMemInputQ(string name, int cycles) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memInputQ += cycles; }
+void Profiler::profileMemBankQ(string name, int cycles) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memBankQ += cycles; }
+void Profiler::profileMemArbWait(string name, int cycles) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memArbWait += cycles; }
+void Profiler::profileMemRandBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memRandBusy++; }
+void Profiler::profileMemNotOld(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memNotOld++; }
+
diff --git a/src/mem/ruby/profiler/Profiler.hh b/src/mem/ruby/profiler/Profiler.hh
index e25105982..4731c7138 100644
--- a/src/mem/ruby/profiler/Profiler.hh
+++ b/src/mem/ruby/profiler/Profiler.hh
@@ -54,6 +54,8 @@
#ifndef PROFILER_H
#define PROFILER_H
+#include "mem/ruby/libruby.hh"
+
#include "mem/ruby/common/Global.hh"
#include "mem/protocol/GenericMachineType.hh"
#include "mem/ruby/config/RubyConfig.hh"
@@ -68,6 +70,7 @@
#include "mem/ruby/common/Set.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/GenericRequestType.hh"
+#include "mem/ruby/system/MemoryControl.hh"
class CacheMsg;
class CacheProfiler;
@@ -75,231 +78,236 @@ class AddressProfiler;
template <class KEY_TYPE, class VALUE_TYPE> class Map;
+struct memory_control_profiler {
+ long long int m_memReq;
+ long long int m_memBankBusy;
+ long long int m_memBusBusy;
+ long long int m_memTfawBusy;
+ long long int m_memReadWriteBusy;
+ long long int m_memDataBusBusy;
+ long long int m_memRefresh;
+ long long int m_memRead;
+ long long int m_memWrite;
+ long long int m_memWaitCycles;
+ long long int m_memInputQ;
+ long long int m_memBankQ;
+ long long int m_memArbWait;
+ long long int m_memRandBusy;
+ long long int m_memNotOld;
+ Vector<long long int> m_memBankCount;
+ int m_banks_per_rank;
+ int m_ranks_per_dimm;
+ int m_dimms_per_channel;
+};
+
+
class Profiler : public Consumer {
public:
- // Constructors
- Profiler();
-
- // Destructor
- ~Profiler();
-
- // Public Methods
- void wakeup();
-
- void setPeriodicStatsFile(const string& filename);
- void setPeriodicStatsInterval(integer_t period);
-
- void printStats(ostream& out, bool short_stats=false);
- void printShortStats(ostream& out) { printStats(out, true); }
- void printTraceStats(ostream& out) const;
- void clearStats();
- void printConfig(ostream& out) const;
- void printResourceUsage(ostream& out) const;
-
- AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
- AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
-
- void addPrimaryStatSample(const CacheMsg& msg, NodeID id);
- void addSecondaryStatSample(GenericRequestType requestType,
- AccessModeType type, int msgSize,
- PrefetchBit pfBit, NodeID id);
- void addSecondaryStatSample(CacheRequestType requestType,
- AccessModeType type, int msgSize,
- PrefetchBit pfBit, NodeID id);
- void addAddressTraceSample(const CacheMsg& msg, NodeID id);
-
- void profileRequest(const string& requestStr);
- void profileSharing(const Address& addr, AccessType type,
- NodeID requestor, const Set& sharers,
- const Set& owner);
-
- void profileMulticastRetry(const Address& addr, int count);
-
- void profileFilterAction(int action);
-
- void profileConflictingRequests(const Address& addr);
- void profileOutstandingRequest(int outstanding) {
- m_outstanding_requests.add(outstanding);
- }
-
- void profileOutstandingPersistentRequest(int outstanding) {
- m_outstanding_persistent_requests.add(outstanding);
- }
- void profileAverageLatencyEstimate(int latency) {
- m_average_latency_estimate.add(latency);
- }
-
- void countBAUnicast() { m_num_BA_unicasts++; }
- void countBABroadcast() { m_num_BA_broadcasts++; }
-
- void recordPrediction(bool wasGood, bool wasPredicted);
-
- void startTransaction(int cpu);
- void endTransaction(int cpu);
- void profilePFWait(Time waitTime);
-
- void controllerBusy(MachineID machID);
- void bankBusy();
- void missLatency(Time t, CacheRequestType type,
- GenericMachineType respondingMach);
- void swPrefetchLatency(Time t, CacheRequestType type,
- GenericMachineType respondingMach);
- void stopTableUsageSample(int num) { m_stopTableProfile.add(num); }
- void L1tbeUsageSample(int num) { m_L1tbeProfile.add(num); }
- void L2tbeUsageSample(int num) { m_L2tbeProfile.add(num); }
- void sequencerRequests(int num) { m_sequencer_requests.add(num); }
- void storeBuffer(int size, int blocks) {
- m_store_buffer_size.add(size);
- m_store_buffer_blocks.add(blocks);
- }
-
- void profileGetXMaskPrediction(const Set& pred_set);
- void profileGetSMaskPrediction(const Set& pred_set);
- void profileTrainingMask(const Set& pred_set);
- void profileTransition(const string& component, NodeID id, NodeID version,
- Address addr, const string& state,
- const string& event, const string& next_state,
- const string& note);
- void profileMsgDelay(int virtualNetwork, int delayCycles);
-
- void print(ostream& out) const;
-
- int64 getTotalInstructionsExecuted() const;
- int64 getTotalTransactionsExecuted() const;
-
- Time getRubyStartTime(){
- return m_ruby_start;
- }
-
- // added for MemoryControl:
- void profileMemReq(int bank);
- void profileMemBankBusy();
- void profileMemBusBusy();
- void profileMemTfawBusy();
- void profileMemReadWriteBusy();
- void profileMemDataBusBusy();
- void profileMemRefresh();
- void profileMemRead();
- void profileMemWrite();
- void profileMemWaitCycles(int cycles);
- void profileMemInputQ(int cycles);
- void profileMemBankQ(int cycles);
- void profileMemArbWait(int cycles);
- void profileMemRandBusy();
- void profileMemNotOld();
+ // Constructors
+ Profiler(const string & name);
+
+ void init(const vector<string> & argv, vector<string> memory_control_names);
+
+ // Destructor
+ ~Profiler();
+
+ // Public Methods
+ void wakeup();
+
+ void setPeriodicStatsFile(const string& filename);
+ void setPeriodicStatsInterval(integer_t period);
+
+ void printStats(ostream& out, bool short_stats=false);
+ void printShortStats(ostream& out) { printStats(out, true); }
+ void printTraceStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+ void printResourceUsage(ostream& out) const;
+
+ AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
+ AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
+
+ void addPrimaryStatSample(const CacheMsg& msg, NodeID id);
+ void addSecondaryStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
+ void addSecondaryStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
+ void addAddressTraceSample(const CacheMsg& msg, NodeID id);
+
+ void profileRequest(const string& requestStr);
+ void profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner);
+
+ void profileMulticastRetry(const Address& addr, int count);
+
+ void profileFilterAction(int action);
+
+ void profileConflictingRequests(const Address& addr);
+ void profileOutstandingRequest(int outstanding) { m_outstanding_requests.add(outstanding); }
+ void profileOutstandingPersistentRequest(int outstanding) { m_outstanding_persistent_requests.add(outstanding); }
+ void profileAverageLatencyEstimate(int latency) { m_average_latency_estimate.add(latency); }
+
+ void countBAUnicast() { m_num_BA_unicasts++; }
+ void countBABroadcast() { m_num_BA_broadcasts++; }
+
+ void recordPrediction(bool wasGood, bool wasPredicted);
+
+ void startTransaction(int cpu);
+ void endTransaction(int cpu);
+ void profilePFWait(Time waitTime);
+
+ void controllerBusy(MachineID machID);
+ void bankBusy();
+ void missLatency(Time t, RubyRequestType type);
+ void swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach);
+ void stopTableUsageSample(int num) { m_stopTableProfile.add(num); }
+ void L1tbeUsageSample(int num) { m_L1tbeProfile.add(num); }
+ void L2tbeUsageSample(int num) { m_L2tbeProfile.add(num); }
+ void sequencerRequests(int num) { m_sequencer_requests.add(num); }
+ void storeBuffer(int size, int blocks) { m_store_buffer_size.add(size); m_store_buffer_blocks.add(blocks);}
+
+ void profileGetXMaskPrediction(const Set& pred_set);
+ void profileGetSMaskPrediction(const Set& pred_set);
+ void profileTrainingMask(const Set& pred_set);
+ void profileTransition(const string& component, NodeID version, Address addr,
+ const string& state, const string& event,
+ const string& next_state, const string& note);
+ void profileMsgDelay(int virtualNetwork, int delayCycles);
+
+ void print(ostream& out) const;
+
+ int64 getTotalInstructionsExecuted() const;
+ int64 getTotalTransactionsExecuted() const;
+
+ void rubyWatch(int proc);
+ bool watchAddress(Address addr);
+
+ // return Ruby's start time
+ Time getRubyStartTime(){
+ return m_ruby_start;
+ }
+
+ // added for MemoryControl:
+ void profileMemReq(string name, int bank);
+ void profileMemBankBusy(string name);
+ void profileMemBusBusy(string name);
+ void profileMemTfawBusy(string name);
+ void profileMemReadWriteBusy(string name);
+ void profileMemDataBusBusy(string name);
+ void profileMemRefresh(string name);
+ void profileMemRead(string name);
+ void profileMemWrite(string name);
+ void profileMemWaitCycles(string name, int cycles);
+ void profileMemInputQ(string name, int cycles);
+ void profileMemBankQ(string name, int cycles);
+ void profileMemArbWait(string name, int cycles);
+ void profileMemRandBusy(string name);
+ void profileMemNotOld(string name);
+ //added by SS
+ bool getHotLines() { return m_hot_lines; }
+ bool getAllInstructions() { return m_all_instructions; }
private:
- // Private Methods
- void addL2StatSample(GenericRequestType requestType, AccessModeType type,
- int msgSize, PrefetchBit pfBit, NodeID id);
- void addL1DStatSample(const CacheMsg& msg, NodeID id);
- void addL1IStatSample(const CacheMsg& msg, NodeID id);
-
- GenericRequestType CacheRequestType_to_GenericRequestType(const CacheRequestType& type);
-
- // Private copy constructor and assignment operator
- Profiler(const Profiler& obj);
- Profiler& operator=(const Profiler& obj);
-
- // Data Members (m_ prefix)
- CacheProfiler* m_L1D_cache_profiler_ptr;
- CacheProfiler* m_L1I_cache_profiler_ptr;
- CacheProfiler* m_L2_cache_profiler_ptr;
- AddressProfiler* m_address_profiler_ptr;
- AddressProfiler* m_inst_profiler_ptr;
-
- Vector<int64> m_instructions_executed_at_start;
- Vector<int64> m_cycles_executed_at_start;
-
- ostream* m_periodic_output_file_ptr;
- integer_t m_stats_period;
-
- Time m_ruby_start;
- time_t m_real_time_start_time;
-
- int m_num_BA_unicasts;
- int m_num_BA_broadcasts;
-
- Vector<integer_t> m_perProcTotalMisses;
- Vector<integer_t> m_perProcUserMisses;
- Vector<integer_t> m_perProcSupervisorMisses;
- Vector<integer_t> m_perProcStartTransaction;
- Vector<integer_t> m_perProcEndTransaction;
- Vector < Vector < integer_t > > m_busyControllerCount;
- integer_t m_busyBankCount;
- Histogram m_multicast_retry_histogram;
-
- Histogram m_L1tbeProfile;
- Histogram m_L2tbeProfile;
- Histogram m_stopTableProfile;
-
- Histogram m_filter_action_histogram;
- Histogram m_tbeProfile;
-
- Histogram m_sequencer_requests;
- Histogram m_store_buffer_size;
- Histogram m_store_buffer_blocks;
- Histogram m_read_sharing_histogram;
- Histogram m_write_sharing_histogram;
- Histogram m_all_sharing_histogram;
- int64 m_cache_to_cache;
- int64 m_memory_to_cache;
-
- Histogram m_prefetchWaitHistogram;
-
- Vector<Histogram> m_missLatencyHistograms;
- Vector<Histogram> m_machLatencyHistograms;
- Histogram m_L2MissLatencyHistogram;
- Histogram m_allMissLatencyHistogram;
-
- Histogram m_allSWPrefetchLatencyHistogram;
- Histogram m_SWPrefetchL2MissLatencyHistogram;
- Vector<Histogram> m_SWPrefetchLatencyHistograms;
- Vector<Histogram> m_SWPrefetchMachLatencyHistograms;
-
- Histogram m_delayedCyclesHistogram;
- Histogram m_delayedCyclesNonPFHistogram;
- Vector<Histogram> m_delayedCyclesVCHistograms;
-
- int m_predictions;
- int m_predictionOpportunities;
- int m_goodPredictions;
-
- Histogram m_gets_mask_prediction;
- Histogram m_getx_mask_prediction;
- Histogram m_explicit_training_mask;
-
- // For profiling possibly conflicting requests
- Map<Address, Time>* m_conflicting_map_ptr;
- Histogram m_conflicting_histogram;
-
- Histogram m_outstanding_requests;
- Histogram m_outstanding_persistent_requests;
-
- Histogram m_average_latency_estimate;
-
- Map<Address, int>* m_watch_address_list_ptr;
- // counts all initiated cache request including PUTs
- int m_requests;
- Map <string, int>* m_requestProfileMap_ptr;
-
- // added for MemoryControl:
- long long int m_memReq;
- long long int m_memBankBusy;
- long long int m_memBusBusy;
- long long int m_memTfawBusy;
- long long int m_memReadWriteBusy;
- long long int m_memDataBusBusy;
- long long int m_memRefresh;
- long long int m_memRead;
- long long int m_memWrite;
- long long int m_memWaitCycles;
- long long int m_memInputQ;
- long long int m_memBankQ;
- long long int m_memArbWait;
- long long int m_memRandBusy;
- long long int m_memNotOld;
- Vector<long long int> m_memBankCount;
+ //added by SS
+ vector<string> m_memory_control_names;
+ // Private Methods
+ void addL2StatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
+ void addL1DStatSample(const CacheMsg& msg, NodeID id);
+ void addL1IStatSample(const CacheMsg& msg, NodeID id);
+
+ GenericRequestType CacheRequestType_to_GenericRequestType(const CacheRequestType& type);
+
+ // Private copy constructor and assignment operator
+ Profiler(const Profiler& obj);
+ Profiler& operator=(const Profiler& obj);
+
+ // Data Members (m_ prefix)
+ CacheProfiler* m_L1D_cache_profiler_ptr;
+ CacheProfiler* m_L1I_cache_profiler_ptr;
+ CacheProfiler* m_L2_cache_profiler_ptr;
+ AddressProfiler* m_address_profiler_ptr;
+ AddressProfiler* m_inst_profiler_ptr;
+
+ Vector<int64> m_instructions_executed_at_start;
+ Vector<int64> m_cycles_executed_at_start;
+
+ ostream* m_periodic_output_file_ptr;
+ integer_t m_stats_period;
+
+ Time m_ruby_start;
+ time_t m_real_time_start_time;
+
+ int m_num_BA_unicasts;
+ int m_num_BA_broadcasts;
+
+ Vector<integer_t> m_perProcTotalMisses;
+ Vector<integer_t> m_perProcUserMisses;
+ Vector<integer_t> m_perProcSupervisorMisses;
+ Vector<integer_t> m_perProcStartTransaction;
+ Vector<integer_t> m_perProcEndTransaction;
+ Vector < Vector < integer_t > > m_busyControllerCount;
+ integer_t m_busyBankCount;
+ Histogram m_multicast_retry_histogram;
+
+ Histogram m_L1tbeProfile;
+ Histogram m_L2tbeProfile;
+ Histogram m_stopTableProfile;
+
+ Histogram m_filter_action_histogram;
+ Histogram m_tbeProfile;
+
+ Histogram m_sequencer_requests;
+ Histogram m_store_buffer_size;
+ Histogram m_store_buffer_blocks;
+ Histogram m_read_sharing_histogram;
+ Histogram m_write_sharing_histogram;
+ Histogram m_all_sharing_histogram;
+ int64 m_cache_to_cache;
+ int64 m_memory_to_cache;
+
+ Histogram m_prefetchWaitHistogram;
+
+ Vector<Histogram> m_missLatencyHistograms;
+ Vector<Histogram> m_machLatencyHistograms;
+ Histogram m_L2MissLatencyHistogram;
+ Histogram m_allMissLatencyHistogram;
+
+ Histogram m_allSWPrefetchLatencyHistogram;
+ Histogram m_SWPrefetchL2MissLatencyHistogram;
+ Vector<Histogram> m_SWPrefetchLatencyHistograms;
+ Vector<Histogram> m_SWPrefetchMachLatencyHistograms;
+
+ Histogram m_delayedCyclesHistogram;
+ Histogram m_delayedCyclesNonPFHistogram;
+ Vector<Histogram> m_delayedCyclesVCHistograms;
+
+ int m_predictions;
+ int m_predictionOpportunities;
+ int m_goodPredictions;
+
+ Histogram m_gets_mask_prediction;
+ Histogram m_getx_mask_prediction;
+ Histogram m_explicit_training_mask;
+
+ // For profiling possibly conflicting requests
+ Map<Address, Time>* m_conflicting_map_ptr;
+ Histogram m_conflicting_histogram;
+
+ Histogram m_outstanding_requests;
+ Histogram m_outstanding_persistent_requests;
+
+ Histogram m_average_latency_estimate;
+
+ Map<Address, int>* m_watch_address_list_ptr;
+ // counts all initiated cache request including PUTs
+ int m_requests;
+ Map <string, int>* m_requestProfileMap_ptr;
+
+ // added for MemoryControl:
+ //added by SS
+ map< string, memory_control_profiler* > m_memory_control_profilers;
+
+ //added by SS
+ bool m_hot_lines;
+ bool m_all_instructions;
+ string m_name;
};
@@ -312,9 +320,9 @@ ostream& operator<<(ostream& out, const Profiler& obj);
extern inline
ostream& operator<<(ostream& out, const Profiler& obj)
{
- obj.print(out);
- out << flush;
- return out;
+ obj.print(out);
+ out << flush;
+ return out;
}
#endif //PROFILER_H
diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc
index 21193ba68..672c175e3 100644
--- a/src/mem/ruby/recorder/CacheRecorder.cc
+++ b/src/mem/ruby/recorder/CacheRecorder.cc
@@ -36,44 +36,40 @@
#include "mem/ruby/recorder/TraceRecord.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/gems_common/PrioHeap.hh"
+#include "gzstream.hh"
CacheRecorder::CacheRecorder()
{
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
- // m_records_ptr = new PrioHeap<TraceRecord>;
+ m_records_ptr = new PrioHeap<TraceRecord>;
}
CacheRecorder::~CacheRecorder()
{
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
- // delete m_records_ptr;
+ delete m_records_ptr;
}
-void CacheRecorder::addRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
+void CacheRecorder::addRecord(const string & sequencer_name, const Address& data_addr, const Address& pc_addr, RubyRequestType type, Time time)
{
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
- // m_records_ptr->insert(TraceRecord(id, data_addr, pc_addr, type, time));
+ m_records_ptr->insert(TraceRecord(sequencer_name, data_addr, pc_addr, type, time));
}
int CacheRecorder::dumpRecords(string filename)
{
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
- // ogzstream out(filename.c_str());
- // if (out.fail()) {
- // cout << "Error: error opening file '" << filename << "'" << endl;
- // return 0;
- // }
+ ogzstream out(filename.c_str());
+ if (out.fail()) {
+ cout << "Error: error opening file '" << filename << "'" << endl;
+ return 0;
+ }
- // int counter = 0;
- // while (m_records_ptr->size() != 0) {
- // TraceRecord record = m_records_ptr->extractMin();
- // record.output(out);
- // counter++;
- // }
- // return counter;
+ int counter = 0;
+ while (m_records_ptr->size() != 0) {
+ TraceRecord record = m_records_ptr->extractMin();
+ record.output(out);
+ counter++;
+ }
+ return counter;
}
void CacheRecorder::print(ostream& out) const
{
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
}
diff --git a/src/mem/ruby/recorder/CacheRecorder.hh b/src/mem/ruby/recorder/CacheRecorder.hh
index 0f69f8478..144e841b3 100644
--- a/src/mem/ruby/recorder/CacheRecorder.hh
+++ b/src/mem/ruby/recorder/CacheRecorder.hh
@@ -38,6 +38,8 @@
#ifndef CACHERECORDER_H
#define CACHERECORDER_H
+#include "mem/ruby/libruby_internal.hh"
+
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/protocol/CacheRequestType.hh"
@@ -55,7 +57,7 @@ public:
~CacheRecorder();
// Public Methods
- void addRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time);
+ void addRecord(const string & sequencer_name, const Address& data_addr, const Address& pc_addr, RubyRequestType type, Time time);
int dumpRecords(string filename);
void print(ostream& out) const;
diff --git a/src/mem/ruby/recorder/TraceRecord.cc b/src/mem/ruby/recorder/TraceRecord.cc
index 31b83690e..1521d2a3f 100644
--- a/src/mem/ruby/recorder/TraceRecord.cc
+++ b/src/mem/ruby/recorder/TraceRecord.cc
@@ -35,13 +35,11 @@
#include "mem/ruby/recorder/TraceRecord.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/protocol/CacheMsg.hh"
-#include "mem/packet.hh"
-TraceRecord::TraceRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
+TraceRecord::TraceRecord(const string & sequencer_name, const Address& data_addr, const Address& pc_addr, RubyRequestType type, Time time)
{
- m_node_num = id;
+ m_sequencer_name = sequencer_name;
m_data_address = data_addr;
m_pc_address = pc_addr;
m_time = time;
@@ -49,8 +47,8 @@ TraceRecord::TraceRecord(NodeID id, const Address& data_addr, const Address& pc_
// Don't differentiate between store misses and atomic requests in
// the trace
- if (m_type == CacheRequestType_ATOMIC) {
- m_type = CacheRequestType_ST;
+ if (m_type == RubyRequestType_RMW) {
+ m_type = RubyRequestType_ST;
}
}
@@ -62,7 +60,7 @@ TraceRecord::TraceRecord(const TraceRecord& obj)
TraceRecord& TraceRecord::operator=(const TraceRecord& obj)
{
- m_node_num = obj.m_node_num;
+ m_sequencer_name = obj.m_sequencer_name;
m_time = obj.m_time;
m_data_address = obj.m_data_address;
m_pc_address = obj.m_pc_address;
@@ -74,32 +72,17 @@ void TraceRecord::issueRequest() const
{
// Lookup sequencer pointer from system
// Note that the chip index also needs to take into account SMT configurations
- AbstractChip* chip_ptr = g_system_ptr->getChip(m_node_num/RubyConfig::numberOfProcsPerChip()/RubyConfig::numberofSMTThreads());
- assert(chip_ptr != NULL);
- Sequencer* sequencer_ptr = chip_ptr->getSequencer((m_node_num/RubyConfig::numberofSMTThreads())%RubyConfig::numberOfProcsPerChip());
+ Sequencer* sequencer_ptr = RubySystem::getSequencer(m_sequencer_name);
assert(sequencer_ptr != NULL);
- Addr data_addr = m_data_address.getAddress();
- Addr pc_addr = m_pc_address.getAddress();
- Request request(0, data_addr, 0, Flags<unsigned int>(Request::PREFETCH), pc_addr, m_node_num, 0);
- MemCmd::Command command;
- if (m_type == CacheRequestType_LD || m_type == CacheRequestType_IFETCH)
- command = MemCmd::ReadReq;
- else if (m_type == CacheRequestType_ST)
- command = MemCmd::WriteReq;
- else if (m_type == CacheRequestType_ATOMIC)
- command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
- else
- assert(false);
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+ RubyRequest request(m_data_address.getAddress(), NULL, RubySystem::getBlockSizeBytes(), m_pc_address.getAddress(), m_type, RubyAccessMode_User);
// Clear out the sequencer
while (!sequencer_ptr->empty()) {
g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
}
- sequencer_ptr->makeRequest(&pkt);
+ sequencer_ptr->makeRequest(request);
// Clear out the sequencer
while (!sequencer_ptr->empty()) {
@@ -109,12 +92,12 @@ void TraceRecord::issueRequest() const
void TraceRecord::print(ostream& out) const
{
- out << "[TraceRecord: Node, " << m_node_num << ", " << m_data_address << ", " << m_pc_address << ", " << m_type << ", Time: " << m_time << "]";
+ out << "[TraceRecord: Node, " << m_sequencer_name << ", " << m_data_address << ", " << m_pc_address << ", " << m_type << ", Time: " << m_time << "]";
}
void TraceRecord::output(ostream& out) const
{
- out << m_node_num << " ";
+ out << m_sequencer_name << " ";
m_data_address.output(out);
out << " ";
m_pc_address.output(out);
@@ -125,13 +108,13 @@ void TraceRecord::output(ostream& out) const
bool TraceRecord::input(istream& in)
{
- in >> m_node_num;
+ in >> m_sequencer_name;
m_data_address.input(in);
m_pc_address.input(in);
string type;
if (!in.eof()) {
in >> type;
- m_type = string_to_CacheRequestType(type);
+ m_type = string_to_RubyRequestType(type);
// Ignore the rest of the line
char c = '\0';
diff --git a/src/mem/ruby/recorder/TraceRecord.hh b/src/mem/ruby/recorder/TraceRecord.hh
index cfe2ff9e3..081d392e5 100644
--- a/src/mem/ruby/recorder/TraceRecord.hh
+++ b/src/mem/ruby/recorder/TraceRecord.hh
@@ -39,17 +39,18 @@
#ifndef TRACERECORD_H
#define TRACERECORD_H
+#include "mem/ruby/libruby_internal.hh"
+
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "mem/protocol/CacheRequestType.hh"
class CacheMsg;
class TraceRecord {
public:
// Constructors
- TraceRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time);
- TraceRecord() { m_node_num = 0; m_time = 0; m_type = CacheRequestType_NULL; }
+ TraceRecord(const string & sequencer_name, const Address& data_addr, const Address& pc_addr, RubyRequestType type, Time time);
+ TraceRecord() { m_sequencer_name = ""; m_time = 0; m_type = RubyRequestType_NULL; }
// Destructor
// ~TraceRecord();
@@ -69,11 +70,11 @@ private:
// Private Methods
// Data Members (m_ prefix)
- NodeID m_node_num;
+ string m_sequencer_name;
Time m_time;
Address m_data_address;
Address m_pc_address;
- CacheRequestType m_type;
+ RubyRequestType m_type;
};
inline extern bool node_less_then_eq(const TraceRecord& n1, const TraceRecord& n2);
diff --git a/src/mem/ruby/recorder/Tracer.cc b/src/mem/ruby/recorder/Tracer.cc
index 896bfd31e..d2df544d8 100644
--- a/src/mem/ruby/recorder/Tracer.cc
+++ b/src/mem/ruby/recorder/Tracer.cc
@@ -37,16 +37,41 @@
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/ruby/system/System.hh"
+#include "mem/ruby/config/RubyConfig.hh"
-Tracer::Tracer()
+//added by SS
+Tracer::Tracer(const string & name)
{
+ m_name = name;
m_enabled = false;
}
+//commented by SS
+//Tracer::Tracer()
+//{
+// m_enabled = false;
+//}
+
Tracer::~Tracer()
{
}
+void Tracer::init(const vector<string> & argv)
+{
+ m_warmup_length = 0;
+
+ for (size_t i=0; i<argv.size(); i+=2) {
+ if ( argv[i] == "warmup_length") {
+ m_warmup_length = atoi(argv[i+1].c_str());
+ }
+ else {
+ cerr << "WARNING: Tracer: Unkown configuration parameter: " << argv[i] << endl;
+ assert(false);
+ }
+ }
+ assert(m_warmup_length > 0);
+}
+
void Tracer::startTrace(string filename)
{
if (m_enabled) {
@@ -73,10 +98,10 @@ void Tracer::stopTrace()
m_enabled = false;
}
-void Tracer::traceRequest(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
+void Tracer::traceRequest(const string & sequencer_name, const Address& data_addr, const Address& pc_addr, RubyRequestType type, Time time)
{
assert(m_enabled == true);
- TraceRecord tr(id, data_addr, pc_addr, type, time);
+ TraceRecord tr(sequencer_name, data_addr, pc_addr, type, time);
tr.output(m_trace_file);
}
@@ -104,10 +129,16 @@ int Tracer::playbackTrace(string filename)
ok = record.input(in);
// Clear the statistics after warmup
- if (counter == g_trace_warmup_length) {
- cout << "Clearing stats after warmup of length " << g_trace_warmup_length << endl;
+/* if (counter == RubyConfig::getTraceWarmupLength()) {
+ cout << "Clearing stats after warmup of length " << RubyConfig::getTraceWarmupLength() << endl;
g_system_ptr->clearStats();
}
+*/
+ if (counter == m_warmup_length) {
+ cout << "Clearing stats after warmup of length " << m_warmup_length << endl;
+ g_system_ptr->clearStats();
+ }
+
}
// Flush the prefetches through the system
diff --git a/src/mem/ruby/recorder/Tracer.hh b/src/mem/ruby/recorder/Tracer.hh
index 0f78f54b2..27a1c95e1 100644
--- a/src/mem/ruby/recorder/Tracer.hh
+++ b/src/mem/ruby/recorder/Tracer.hh
@@ -38,6 +38,8 @@
#ifndef TRACER_H
#define TRACER_H
+#include "mem/ruby/libruby_internal.hh"
+
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/protocol/CacheRequestType.hh"
@@ -50,7 +52,8 @@ class TraceRecord;
class Tracer {
public:
// Constructors
- Tracer();
+// Tracer();
+ Tracer(const string & name);
// Destructor
~Tracer();
@@ -59,12 +62,13 @@ public:
void startTrace(string filename);
void stopTrace();
bool traceEnabled() { return m_enabled; }
- void traceRequest(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time);
+ void traceRequest(const string & sequencer_name, const Address& data_addr, const Address& pc_addr, RubyRequestType type, Time time);
void print(ostream& out) const;
// Public Class Methods
- static int playbackTrace(string filename);
+ int playbackTrace(string filename);
+ void init(const vector<string> & argv);
private:
// Private Methods
@@ -75,6 +79,10 @@ private:
// Data Members (m_ prefix)
ogzstream m_trace_file;
bool m_enabled;
+
+ //added by SS
+ int m_warmup_length;
+ string m_name;
};
// Output operator declaration
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
index 60ca412ad..83039a9b0 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
@@ -37,6 +37,8 @@
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
AbstractCacheEntry::AbstractCacheEntry() {
+ m_Address.setAddress(0);
+ m_Permission = AccessPermission_NotPresent;
}
// still need to define destructor for subclasses
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
index 18dc16ca8..be1f14b05 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
@@ -41,6 +41,8 @@
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/AccessPermission.hh"
+class DataBlock;
+
class AbstractCacheEntry {
public:
// Constructors
@@ -53,6 +55,7 @@ public:
// The methods below are those called by ruby runtime, add when it is
// absolutely necessary and should all be virtual function.
+ virtual DataBlock& getDataBlk() = 0;
virtual void print(ostream& out) const = 0;
diff --git a/src/mem/ruby/slicc_interface/AbstractChip.hh b/src/mem/ruby/slicc_interface/AbstractChip.hh
deleted file mode 100644
index d47dd6306..000000000
--- a/src/mem/ruby/slicc_interface/AbstractChip.hh
+++ /dev/null
@@ -1,126 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description: Common base class for a machine chip.
- *
- */
-
-#ifndef ABSTRACT_CHIP_H
-#define ABSTRACT_CHIP_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/protocol/L1Cache_Entry.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/gems_common/Vector.hh"
-
-class Network;
-class Sequencer;
-class StoreBuffer;
-class ENTRY;
-class MessageBuffer;
-class CacheRecorder;
-class TransactionInterfaceManager;
-
-template<class ENTRY> class CacheMemory;
-
-class AbstractChip {
-public:
- // Constructors
- AbstractChip(NodeID chip_number, Network* net_ptr);
-
- // Destructor, prevent from being instantiated
- virtual ~AbstractChip() = 0;
-
- // Public Methods
- NodeID getID() const { return m_id; };
- Network* getNetwork() const { return m_net_ptr; };
- Sequencer* getSequencer(int index) const { return m_L1Cache_sequencer_vec[index]; };
- TransactionInterfaceManager* getTransactionInterfaceManager(int index) const { return m_L1Cache_xact_mgr_vec[index]; };
- void setTransactionInterfaceManager(TransactionInterfaceManager* manager, int index) { m_L1Cache_xact_mgr_vec[index] = manager; }
-
- // used when CHECK_COHERENCE is enabled. See RubySystem::checkGlobalCoherence()
- virtual bool isBlockExclusive(const Address& addr) const { return false; }
- virtual bool isBlockShared(const Address& addr) const { return false; }
-
- // cache dump functions
- virtual void recordCacheContents(CacheRecorder& tr) const = 0;
- virtual void dumpCaches(ostream& out) const = 0;
- virtual void dumpCacheData(ostream& out) const = 0;
-
- virtual void printConfig(ostream& out) = 0;
- virtual void print(ostream& out) const = 0;
-
- // pulic data structures
- Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_L1DcacheMemory_vec;
- Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_L1IcacheMemory_vec;
- Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_cacheMemory_vec;
- Vector < CacheMemory<L1Cache_Entry>* > m_L1Cache_L2cacheMemory_vec;
- Vector < CacheMemory<L1Cache_Entry>* > m_L2Cache_L2cacheMemory_vec;
-
- // added so that the prefetcher and sequencer can access the L1 and L2 request queues
- Vector < MessageBuffer* > m_L1Cache_optionalQueue_vec;
- Vector < MessageBuffer* >m_L1Cache_mandatoryQueue_vec;
-
- // TSO storebuffer
- Vector < StoreBuffer* > m_L1Cache_storeBuffer_vec;
-
- // TM transaction manager
- Vector < TransactionInterfaceManager* > m_L1Cache_xact_mgr_vec;
-
-protected:
-
- // Data Members (m_ prefix)
- NodeID m_id; // Chip id
- Network* m_net_ptr; // Points to the Network simulator
- Vector < Sequencer* > m_L1Cache_sequencer_vec; // All chip should have a sequencer
-
-
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const AbstractChip& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const AbstractChip& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //ABSTRACT_CHIP_H
-
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
new file mode 100644
index 000000000..3a93cc745
--- /dev/null
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -0,0 +1,33 @@
+
+#ifndef ABSTRACTCONTROLLER_H
+#define ABSTRACTCONTROLLER_H
+
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/protocol/MachineType.hh"
+
+class MessageBuffer;
+class Network;
+
+class AbstractController : public Consumer {
+public:
+ AbstractController() {}
+ virtual void init(Network* net_ptr, const vector<string> & argv) = 0;
+
+ // returns the number of controllers created of the specific subtype
+ // virtual int getNumberOfControllers() const = 0;
+ virtual MessageBuffer* getMandatoryQueue() const = 0;
+ virtual const int & getVersion() const = 0;
+ virtual const string toString() const = 0; // returns text version of controller type
+ virtual const string getName() const = 0; // return instance name
+ virtual const MachineType getMachineType() const = 0;
+
+ virtual void print(ostream & out) const = 0;
+ virtual void printStats(ostream & out) const = 0;
+ virtual void printConfig(ostream & out) const = 0;
+ virtual void wakeup() = 0;
+ // virtual void dumpStats(ostream & out) = 0;
+ virtual void clearStats() = 0;
+
+};
+
+#endif
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh b/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh
index bf5778479..9ece7ae65 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh
+++ b/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh
@@ -42,6 +42,7 @@
#include "mem/ruby/common/Set.hh"
#include "mem/ruby/common/NetDest.hh"
#include "mem/protocol/GenericMachineType.hh"
+#include "mem/ruby/system/DirectoryMemory.hh"
#ifdef MACHINETYPE_L1Cache
#define MACHINETYPE_L1CACHE_ENUM MachineType_L1Cache
@@ -61,44 +62,22 @@
#define MACHINETYPE_L3CACHE_ENUM MachineType_NUM
#endif
+/*
#ifdef MACHINETYPE_PersistentArbiter
#define MACHINETYPE_PERSISTENTARBITER_ENUM MachineType_PersistentArbiter
#else
#define MACHINETYPE_PERSISTENTARBITER_ENUM MachineType_NUM
#endif
-
-#ifdef MACHINETYPE_Collector
-#define MACHINETYPE_COLLECTOR_ENUM MachineType_Collector
-#else
-#define MACHINETYPE_COLLECTOR_ENUM MachineType_NUM
-#endif
-
-
-// used to determine the correct L1 set
-// input parameters are the address and number of set bits for the L1 cache
-// returns a value between 0 and the total number of L1 cache sets
-inline
-int map_address_to_L1CacheSet(const Address& addr, int cache_num_set_bits)
-{
- return addr.bitSelect(RubyConfig::dataBlockBits(),
- RubyConfig::dataBlockBits()+cache_num_set_bits-1);
-}
-
-// used to determine the correct L2 set
-// input parameters are the address and number of set bits for the L2 cache
-// returns a value between 0 and the total number of L2 cache sets
-inline
-int map_address_to_L2CacheSet(const Address& addr, int cache_num_set_bits)
+*/
+/*
+inline MachineID map_Address_to_L2Cache(const Address & addr)
{
- assert(cache_num_set_bits == L2_CACHE_NUM_SETS_BITS); // ensure the l2 bank mapping functions agree with l2 set bits
-
- if (MAP_L2BANKS_TO_LOWEST_BITS) {
- return addr.bitSelect(RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits(),
- RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits()+cache_num_set_bits-1);
- } else {
- return addr.bitSelect(RubyConfig::dataBlockBits(),
- RubyConfig::dataBlockBits()+cache_num_set_bits-1);
- }
+ int L2bank = 0;
+ MachineID mach = {MACHINETYPE_L2CACHE_ENUM, 0};
+ L2bank = addr.bitSelect(RubySystem::getBlockSizeBits(),
+ RubySystem::getBlockSizeBits() + RubyConfig::getNumberOfCachesPerLevel(2)-1);
+ mach.num = L2bank;
+ return mach;
}
// input parameter is the base ruby node of the L1 cache
@@ -106,16 +85,18 @@ int map_address_to_L2CacheSet(const Address& addr, int cache_num_set_bits)
inline
MachineID map_L1CacheMachId_to_L2Cache(const Address& addr, MachineID L1CacheMachId)
{
+ return map_Address_to_L2Cache(addr);
+
int L2bank = 0;
MachineID mach = {MACHINETYPE_L2CACHE_ENUM, 0};
if (RubyConfig::L2CachePerChipBits() > 0) {
- if (MAP_L2BANKS_TO_LOWEST_BITS) {
- L2bank = addr.bitSelect(RubyConfig::dataBlockBits(),
- RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits()-1);
+ if (RubyConfig::getMAP_L2BANKS_TO_LOWEST_BITS()) {
+ L2bank = addr.bitSelect(RubySystem::getBlockSizeBits(),
+ RubySystem::getBlockSizeBits()+RubyConfig::L2CachePerChipBits()-1);
} else {
- L2bank = addr.bitSelect(RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS,
- RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS+RubyConfig::L2CachePerChipBits()-1);
+ L2bank = addr.bitSelect(RubySystem::getBlockSizeBits()+RubyConfig::getL2_CACHE_NUM_SETS_BITS(),
+ RubySystem::getBlockSizeBits()+RubyConfig::getL2_CACHE_NUM_SETS_BITS()+RubyConfig::L2CachePerChipBits()-1);
}
}
@@ -126,72 +107,38 @@ MachineID map_L1CacheMachId_to_L2Cache(const Address& addr, MachineID L1CacheMac
+ L2bank; // bank #
assert(mach.num < RubyConfig::numberOfL2Cache());
return mach;
+
}
+
// used to determine the correct L2 bank
// input parameter is the base ruby node of the L2 cache
// returns a value between 0 and total_L2_Caches_within_the_system
+
inline
MachineID map_L2ChipId_to_L2Cache(const Address& addr, NodeID L2ChipId)
{
+ return map_Address_to_L2Cache(addr);
+
assert(L2ChipId < RubyConfig::numberOfChips());
int L2bank = 0;
MachineID mach = {MACHINETYPE_L2CACHE_ENUM, 0};
+ L2bank = addr.bitSelect(RubySystem::getBlockSizeBits(),
+ RubySystem::getBlockSizeBits() + RubyConfig::numberOfCachesPerLevel(2)-1);
+ mach.num = L2bank;
+ return mach
- if (RubyConfig::L2CachePerChipBits() > 0) {
- if (MAP_L2BANKS_TO_LOWEST_BITS) {
- L2bank = addr.bitSelect(RubyConfig::dataBlockBits(),
- RubyConfig::dataBlockBits()+RubyConfig::L2CachePerChipBits()-1);
- } else {
- L2bank = addr.bitSelect(RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS,
- RubyConfig::dataBlockBits()+L2_CACHE_NUM_SETS_BITS+RubyConfig::L2CachePerChipBits()-1);
- }
- }
-
- assert(L2bank < RubyConfig::numberOfL2CachePerChip());
- assert(L2bank >= 0);
-
- mach.num = L2ChipId*RubyConfig::numberOfL2CachePerChip() // base #
- + L2bank; // bank #
- assert(mach.num < RubyConfig::numberOfL2Cache());
- return mach;
}
+ */
+
// used to determine the home directory
// returns a value between 0 and total_directories_within_the_system
inline
NodeID map_Address_to_DirectoryNode(const Address& addr)
{
- NodeID dirNode = 0;
-
- if (RubyConfig::memoryBits() > 0) {
- dirNode = addr.bitSelect(RubyConfig::dataBlockBits(),
- RubyConfig::dataBlockBits()+RubyConfig::memoryBits()-1);
- }
-
- // Index indexHighPortion = address.bitSelect(MEMORY_SIZE_BITS-1, PAGE_SIZE_BITS+NUMBER_OF_MEMORY_MODULE_BITS);
- // Index indexLowPortion = address.bitSelect(DATA_BLOCK_BITS, PAGE_SIZE_BITS-1);
-
- //Index index = indexLowPortion | (indexHighPortion << (PAGE_SIZE_BITS - DATA_BLOCK_BITS));
-
-/*
-
-ADDRESS_WIDTH MEMORY_SIZE_BITS PAGE_SIZE_BITS DATA_BLOCK_BITS
- | | | |
- \ / \ / \ / \ / 0
- -----------------------------------------------------------------------
- | unused |xxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxx| |
- | |xxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxx| |
- -----------------------------------------------------------------------
- indexHighPortion indexLowPortion
- <------->
- NUMBER_OF_MEMORY_MODULE_BITS
- */
-
- assert(dirNode < RubyConfig::numberOfMemories());
- assert(dirNode >= 0);
- return dirNode;
+ return DirectoryMemory::mapAddressToDirectoryVersion(addr);
}
// used to determine the home directory
@@ -204,29 +151,13 @@ MachineID map_Address_to_Directory(const Address &addr)
}
inline
-MachineID map_Address_to_CentralArbiterNode(const Address& addr)
-{
- MachineType t = MACHINETYPE_PERSISTENTARBITER_ENUM;
- MachineID mach = {t, map_Address_to_DirectoryNode(addr)};
-
- assert(mach.num < RubyConfig::numberOfMemories());
- assert(mach.num >= 0);
- return mach;
-}
-
-inline
-NetDest getMultiStaticL2BankNetDest(const Address& addr, const Set& sharers) // set of L2RubyNodes
+MachineID map_Address_to_DMA(const Address & addr)
{
- NetDest dest;
-
- for (int i = 0; i < sharers.getSize(); i++) {
- if (sharers.isElement(i)) {
- dest.add(map_L2ChipId_to_L2Cache(addr,i));
- }
- }
- return dest;
+ MachineID dma = {MachineType_DMA, 0};
+ return dma;
}
+/*
inline
NetDest getOtherLocalL1IDs(MachineID L1)
{
@@ -244,119 +175,7 @@ NetDest getOtherLocalL1IDs(MachineID L1)
return ret;
}
-
-inline
-NetDest getLocalL1IDs(MachineID mach)
-{
- assert(MACHINETYPE_L1CACHE_ENUM != MachineType_NUM);
-
- NetDest ret;
-
- if (mach.type == MACHINETYPE_L1CACHE_ENUM) {
-
- int start = (mach.num / RubyConfig::numberOfL1CachePerChip()) * RubyConfig::numberOfProcsPerChip();
-
- for (int i = start; i < (start + RubyConfig::numberOfProcsPerChip()); i++) {
- MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
- ret.add( mach );
- }
- }
- else if (mach.type == MACHINETYPE_L2CACHE_ENUM) {
-
- int chip = mach.num/RubyConfig::numberOfL2CachePerChip();
- int start = ( chip*RubyConfig::numberOfL1CachePerChip());
- for (int i = start; i < (start + RubyConfig::numberOfL1CachePerChip()); i++) {
- MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
- ret.add( mach );
- }
- }
-
- return ret;
-}
-
-inline
-NetDest getExternalL1IDs(MachineID L1)
-{
- NetDest ret;
-
- assert(MACHINETYPE_L1CACHE_ENUM != MachineType_NUM);
-
- for (int i = 0; i < RubyConfig::numberOfProcessors(); i++) {
- // ret.add( (NodeID) i);
- MachineID mach = { MACHINETYPE_L1CACHE_ENUM, i };
- ret.add( mach );
- }
-
- ret.removeNetDest(getLocalL1IDs(L1));
-
- return ret;
-}
-
-inline
-bool isLocalProcessor(MachineID thisId, MachineID tarID)
-{
- int start = (thisId.num / RubyConfig::numberOfProcsPerChip()) * RubyConfig::numberOfProcsPerChip();
-
- for (int i = start; i < (start + RubyConfig::numberOfProcsPerChip()); i++) {
- if (i == tarID.num) {
- return true;
- }
- }
- return false;
-}
-
-
-inline
-NetDest getAllPertinentL2Banks(const Address& addr) // set of L2RubyNodes
-{
- NetDest dest;
-
- for (int i = 0; i < RubyConfig::numberOfChips(); i++) {
- dest.add(map_L2ChipId_to_L2Cache(addr,i));
- }
- return dest;
-}
-
-inline
-bool isL1OnChip(MachineID L1machID, NodeID L2NodeID)
-{
- if (L1machID.type == MACHINETYPE_L1CACHE_ENUM) {
- return (L1machID.num == L2NodeID);
- } else {
- return false;
- }
-}
-
-inline
-bool isL2OnChip(MachineID L2machID, NodeID L2NodeID)
-{
- if (L2machID.type == MACHINETYPE_L2CACHE_ENUM) {
- return (L2machID.num == L2NodeID);
- } else {
- return false;
- }
-}
-
-inline
-NodeID closest_clockwise_distance(NodeID this_node, NodeID next_node)
-{
- if (this_node <= next_node) {
- return (next_node - this_node);
- } else {
- return (next_node - this_node + RubyConfig::numberOfChips());
- }
-}
-
-inline
-bool closer_clockwise_processor(NodeID this_node, NodeID newer, NodeID older)
-{
- return (closest_clockwise_distance(this_node, newer) < closest_clockwise_distance(this_node, older));
-}
-
-extern inline NodeID getChipID(MachineID L2machID)
-{
- return (L2machID.num%RubyConfig::numberOfChips())/RubyConfig::numberOfProcsPerChip();
-}
+*/
extern inline NodeID machineIDToNodeID(MachineID machID)
{
@@ -364,11 +183,6 @@ extern inline NodeID machineIDToNodeID(MachineID machID)
return machID.num;
}
-extern inline NodeID machineIDToVersion(MachineID machID)
-{
- return machID.num/RubyConfig::numberOfChips();
-}
-
extern inline MachineType machineIDToMachineType(MachineID machID)
{
return machID.type;
@@ -379,25 +193,22 @@ extern inline NodeID L1CacheMachIDToProcessorNum(MachineID machID)
assert(machID.type == MachineType_L1Cache);
return machID.num;
}
-
+/*
extern inline NodeID L2CacheMachIDToChipID(MachineID machID)
{
assert(machID.type == MACHINETYPE_L2CACHE_ENUM);
- return machID.num/RubyConfig::numberOfL2CachePerChip();
-}
-
-extern inline MachineID getCollectorDest(MachineID L1MachID)
-{
- MachineID mach = {MACHINETYPE_COLLECTOR_ENUM, L1MachID.num};
- return mach;
-}
-
-extern inline MachineID getCollectorL1Cache(MachineID colID)
-{
- MachineID mach = {MACHINETYPE_L1CACHE_ENUM, colID.num};
- return mach;
+ int L2bank = machID.num;
+ int banks_seen = 0;
+ for (int i=0;i<RubyConfig::getNumberOfChips();i++) {
+ for (int j=0;j<RubyConfig::getNumberOfCachesPerLevelPerChip(2,i);j++) {
+ if (banks_seen == L2bank)
+ return i;
+ banks_seen++;
+ }
+ }
+ assert(0);
}
-
+*/
extern inline MachineID getL1MachineID(NodeID L1RubyNode)
{
MachineID mach = {MACHINETYPE_L1CACHE_ENUM, L1RubyNode};
@@ -413,8 +224,6 @@ extern inline GenericMachineType ConvertMachToGenericMach(MachineType machType)
return GenericMachineType_L3Cache;
} else if (machType == MachineType_Directory) {
return GenericMachineType_Directory;
- } else if (machType == MACHINETYPE_COLLECTOR_ENUM) {
- return GenericMachineType_Collector;
} else {
ERROR_MSG("cannot convert to a GenericMachineType");
return GenericMachineType_NULL;
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc
index f16a02f26..6a12af385 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.cc
@@ -85,7 +85,7 @@ void profile_miss(const CacheMsg& msg, NodeID id)
ASSERT (!Protocol::m_CMP);
g_system_ptr->getProfiler()->addAddressTraceSample(msg, id);
- g_system_ptr->getProfiler()->profileConflictingRequests(msg.getAddress());
+ g_system_ptr->getProfiler()->profileConflictingRequests(msg.getLineAddress());
g_system_ptr->getProfiler()->addSecondaryStatSample(msg.getType(),
msg.getAccessMode(), msg.getSize(), msg.getPrefetch(), id);
@@ -93,9 +93,6 @@ void profile_miss(const CacheMsg& msg, NodeID id)
void profile_L1Cache_miss(const CacheMsg& msg, NodeID id)
{
- // only called by protocols assuming non-zero cycle hits
- ASSERT (REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH);
-
g_system_ptr->getProfiler()->addPrimaryStatSample(msg, id);
}
@@ -139,25 +136,5 @@ void profileGetS(const Address& datablock, const Address& PC, const Set& owner,
g_system_ptr->getProfiler()->getAddressProfiler()->profileGetS(datablock, PC, owner, sharers, requestor);
}
-void profileOverflow(const Address & addr, MachineID mach)
-{
-#if 0
- if(mach.type == MACHINETYPE_L1CACHE_ENUM){
- // for L1 overflows
- int proc_num = L1CacheMachIDToProcessorNum(mach);
- int chip_num = proc_num/RubyConfig::numberOfProcsPerChip();
- assert(0);
- // g_system_ptr->getChip(chip_num)->m_L1Cache_xact_mgr_vec[proc_num]->profileOverflow(addr, true);
- }
- else if(mach.type == MACHINETYPE_L2CACHE_ENUM){
- // for L2 overflows
- int chip_num = L2CacheMachIDToChipID(mach);
- for(int p=0; p < RubyConfig::numberOfProcessors(); ++p){
- assert(0);
- // g_system_ptr->getChip(chip_num)->m_L1Cache_xact_mgr_vec[p]->profileOverflow(addr, false);
- }
- }
-#endif
-}
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
index d0fc0c4a5..d8692951e 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
@@ -54,6 +54,7 @@
#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/protocol/PrefetchBit.hh"
+#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
@@ -67,7 +68,7 @@ extern inline int random(int n)
extern inline bool multicast_retry()
{
- if (RANDOMIZATION) {
+ if (RubySystem::getRandomization()) {
return (random() & 0x1);
} else {
return true;
@@ -111,15 +112,18 @@ extern inline int MessageSizeTypeToInt(MessageSizeType size_type)
return MessageSizeType_to_int(size_type);
}
+/*
extern inline int numberOfNodes()
{
return RubyConfig::numberOfChips();
}
-
+*/
+/*
extern inline int numberOfL1CachePerChip()
{
- return RubyConfig::numberOfL1CachePerChip();
+ return RubyConfig::getNumberOfCachesPerLevelPerChip(1,0);
}
+*/
extern inline bool long_enough_ago(Time event)
{
@@ -149,7 +153,7 @@ extern inline Time getTimeMinusTime(Time t1, Time t2)
extern inline Time getPreviousDelayedCycles(Time t1, Time t2)
{
- if (RANDOMIZATION) { // when randomizing delayed
+ if (RubySystem::getRandomization()) { // when randomizing delayed
return 0;
} else {
return getTimeMinusTime(t1, t2);
@@ -167,39 +171,40 @@ extern inline Time time_to_int(Time time)
return time;
}
-
+/*
extern inline bool getFilteringEnabled()
{
- return g_FILTERING_ENABLED;
+ return RubyConfig::getFilteringEnabled();
}
+
extern inline int getRetryThreshold()
{
- return g_RETRY_THRESHOLD;
+ return RubyConfig::getRetryThreshold();
}
extern inline int getFixedTimeoutLatency()
{
- return g_FIXED_TIMEOUT_LATENCY;
+ return RubyConfig::getFixedTimeoutLatency();
}
extern inline int N_tokens()
{
// return N+1 to handle clean writeback
- return g_PROCS_PER_CHIP + 1;
+ return RubyConfig::getProcsPerChip() + 1;
// return 1;
}
extern inline bool distributedPersistentEnabled()
{
- return g_DISTRIBUTED_PERSISTENT_ENABLED;
+ return RubyConfig::getDistributedPersistentEnabled();
}
extern inline bool getDynamicTimeoutEnabled()
{
- return g_DYNAMIC_TIMEOUT_ENABLED;
+ return RubyConfig::getDynamicTimeoutEnabled();
}
-
+*/
// Appends an offset to an address
extern inline Address setOffset(Address addr, int offset)
{
diff --git a/src/mem/ruby/storebuffer/hfa.hh b/src/mem/ruby/storebuffer/hfa.hh
new file mode 100644
index 000000000..abcd96495
--- /dev/null
+++ b/src/mem/ruby/storebuffer/hfa.hh
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// this code was modified to fit into Rochs
+
+#ifndef _HFA_H_
+#define _HFA_H_
+
+using namespace std;
+
+/*
+ * Global include file for entire project.
+ * Should be included first in all ".cc" project files
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+#include "mem/ruby/common/Global.hh"
+#include <string>
+#include <map>
+#include <set>
+#include <list>
+#include <fstream>
+#include <iostream>
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h> // va_start(), va_end()
+#include <strings.h> // declaration of bzero()
+
+#include <sys/time.h> // gettimeofday() includes
+#include <errno.h>
+#include <unistd.h>
+
+/*------------------------------------------------------------------------*/
+/* Type Includes */
+/*------------------------------------------------------------------------*/
+
+#include "mem/ruby/storebuffer/hfatypes.hh"
+
+/*------------------------------------------------------------------------*/
+/* Forward class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+class wait_list_t;
+class waiter_t;
+class free_list_t;
+class pipestate_t;
+class pipepool_t;
+
+
+/** Maximum size of a load or store that may occur to/from the memory system.
+ * (in 64-bit quantities). Currently this is set to 8 * 64-bits = 64-bytes.
+ */
+const uint32 MEMOP_MAX_SIZE = 8;
+
+/** 64-bit int memory masks */
+#define MEM_BYTE_MASK 0x00000000000000ffULL
+#define MEM_HALF_MASK 0x000000000000ffffULL
+#define MEM_WORD_MASK 0x00000000ffffffffULL
+#define MEM_EXTD_MASK 0xffffffffffffffffULL
+#define MEM_QUAD_MASK 0xffffffffffffffffULL
+
+#define ISEQ_MASK 0x0000ffffffffffffULL
+
+/*------------------------------------------------------------------------*/
+/* Configuration Parameters */
+/*------------------------------------------------------------------------*/
+
+#define SIM_HALT assert(0);
+
+#include <assert.h>
+
+#endif /* _HFA_H_ */
+
+
diff --git a/src/mem/ruby/storebuffer/hfatypes.hh b/src/mem/ruby/storebuffer/hfatypes.hh
new file mode 100644
index 000000000..c4d0de2e6
--- /dev/null
+++ b/src/mem/ruby/storebuffer/hfatypes.hh
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HFATYPES_H_
+#define _HFATYPES_H_
+
+/*
+ * Global include file for entire project.
+ * Should be included first in all ".cc" project files
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+/*------------------------------------------------------------------------*/
+/* SimIcs Includes */
+/*------------------------------------------------------------------------*/
+
+/* import C functions */
+
+
+/*------------------------------------------------------------------------*/
+/* Forward class declaration(s) */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Macro declarations */
+/*------------------------------------------------------------------------*/
+
+// definitions of MAX / MIN (if needed)
+#ifndef MAX
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* Statistics tracking definition */
+#define STAT_INC(A) (A)++
+
+/*------------------------------------------------------------------------*/
+/* Enumerations */
+/*------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*/
+/* Project Includes */
+/*------------------------------------------------------------------------*/
+
+typedef unsigned char byte_t; /* byte - 8 bits */
+typedef unsigned short half_t; /* half - 16 bits */
+typedef unsigned int word_t; /* word - 32 bits */
+typedef uint64 tick_t; /* time - 64 bit */
+
+#endif /* _HFATYPES_H_ */
diff --git a/src/mem/ruby/storebuffer/interface.cc b/src/mem/ruby/storebuffer/interface.cc
new file mode 100644
index 000000000..1ee6ee3a0
--- /dev/null
+++ b/src/mem/ruby/storebuffer/interface.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/libruby.hh"
+#include "writebuffer.hh"
+#include <iostream>
+
+writebuffer_status_t handleStore (writebuffer_t * writebuffer, const RubyRequest & request) {
+ assert(request.type == RubyRequestType_ST);
+ if (writebuffer->writeBufferFull()){
+ return WB_FULL;
+ }
+ else if (writebuffer->writeBufferFlushing()) {
+ return WB_FLUSHING;
+ }
+ else {
+ writebuffer->addToWriteBuffer(request);
+ return WB_OK;
+ }
+}
+
+uint64_t handleLoad(writebuffer_t * writebuffer, const RubyRequest & request) {
+ assert(request.type == RubyRequestType_LD);
+ return writebuffer->handleLoad(request);
+}
+
+uint64_t handleAtomic(writebuffer_t * writebuffer, const RubyRequest & request) {
+ // flush the store buffer
+ writebuffer->flushWriteBuffer();
+ // let writebuffer issue atomic
+ //return writebuffer->issueAtomic(request);
+}
+
+void flushSTB(writebuffer_t * writebuffer) {
+ // in in-order can't get a request to flushSTB if already flushing
+ // on out of order, have to check if already flushing
+ writebuffer->flushWriteBuffer();
+}
+
+void registerHitCallback(writebuffer_t * writebuffer, void (*hit_callback)(int64_t access_id)) {
+ writebuffer->registerHitCallback(hit_callback);
+}
diff --git a/src/mem/ruby/storebuffer/interface.hh b/src/mem/ruby/storebuffer/interface.hh
new file mode 100644
index 000000000..cbf010275
--- /dev/null
+++ b/src/mem/ruby/storebuffer/interface.hh
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef STB_H
+#define STB_H
+
+#include "mem/ruby/libruby.hh"
+#include "writebuffer.hh"
+#include <iostream>
+
+writebuffer_status_t handleStore (writebuffer_t * writebuffer, const RubyRequest & request);
+
+uint64_t handleLoad(writebuffer_t * writebuffer, const RubyRequest & request);
+
+uint64_t handleAtomic(writebuffer_t * writebuffer, const RubyRequest & request);
+
+void flushSTB(writebuffer_t * writebuffer);
+
+void registerHitCallback(writebuffer_t * writebuffer, void (*hit_callback)(int64_t access_id));
+
+#endif
diff --git a/src/mem/ruby/storebuffer/stb_interface.cc b/src/mem/ruby/storebuffer/stb_interface.cc
new file mode 100644
index 000000000..df280d9e1
--- /dev/null
+++ b/src/mem/ruby/storebuffer/stb_interface.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "mem/ruby/storebuffer/stb_interface.hh"
+
+StoreBuffer * createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size) {
+ StoreBuffer * stb = new StoreBuffer(id, block_bits, storebuffer_size);
+ return stb;
+}
+
+storebuffer_status_t handleStore (StoreBuffer * storebuffer, const RubyRequest & request) {
+ assert(request.type == RubyRequestType_ST);
+ if (storebuffer->storeBufferFull()){
+ return WB_FULL;
+ }
+ else if (storebuffer->storeBufferFlushing()) {
+ return WB_FLUSHING;
+ }
+ else {
+ storebuffer->addToStoreBuffer(request);
+ return WB_OK;
+ }
+}
+
+uint64_t handleLoad(StoreBuffer * storebuffer, const RubyRequest & request) {
+ assert(request.type == RubyRequestType_LD);
+ return storebuffer->handleLoad(request);
+}
+
+uint64_t handleAtomic(StoreBuffer * storebuffer, const RubyRequest & request) {
+ // flush the store buffer
+ storebuffer->flushStoreBuffer();
+ // let storebuffer issue atomic
+ //return storebuffer->issueAtomic(request);
+}
+
+void flushSTB(StoreBuffer * storebuffer) {
+ // in in-order can't get a request to flushSTB if already flushing
+ // on out of order, have to check if already flushing
+ storebuffer->flushStoreBuffer();
+}
+
+void registerHitCallback(StoreBuffer * storebuffer, void (*hit_callback)(int64_t access_id)) {
+ storebuffer->registerHitCallback(hit_callback);
+}
+
+
diff --git a/src/mem/ruby/storebuffer/stb_interface.hh b/src/mem/ruby/storebuffer/stb_interface.hh
new file mode 100644
index 000000000..e1a026abc
--- /dev/null
+++ b/src/mem/ruby/storebuffer/stb_interface.hh
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/storebuffer/storebuffer.hh"
+#include <iostream>
+
+StoreBuffer * createNewSTB (uint32 id, uint32 block_bits, int storebuffer_size);
+
+storebuffer_status_t handleStore (StoreBuffer * storebuffer, const RubyRequest & request);
+
+uint64_t handleLoad(StoreBuffer * storebuffer, const RubyRequest & request);
+
+uint64_t handleAtomic(StoreBuffer * storebuffer, const RubyRequest & request);
+
+void flushSTB(StoreBuffer * storebuffer);
+
+void registerHitCallback(StoreBuffer * storebuffer, void (*hit_callback)(int64_t access_id));
diff --git a/src/mem/ruby/storebuffer/storebuffer.cc b/src/mem/ruby/storebuffer/storebuffer.cc
new file mode 100644
index 000000000..6d6e4b228
--- /dev/null
+++ b/src/mem/ruby/storebuffer/storebuffer.cc
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*------------------------------------------------------------------------*/
+/* Includes */
+/*------------------------------------------------------------------------*/
+
+#include "mem/ruby/storebuffer/hfa.hh"
+#include "mem/ruby/storebuffer/storebuffer.hh"
+#include <map>
+#include "mem/ruby/common/Global.hh"
+#include "TsoChecker.hh"
+
+#define SYSTEM_EXIT ASSERT(0)
+
+
+// global map of request id_s to map them back to storebuffer pointers
+map <uint64_t, StoreBuffer *> request_map;
+
+Tso::TsoChecker * g_tsoChecker;
+
+void hit(int64_t id) {
+ if (request_map.find(id) == request_map.end()) {
+ ERROR_OUT("Request ID not found in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ }
+ else {
+ request_map[id]->complete(id);
+ request_map.erase(id);
+ }
+}
+
+
+//*****************************************************************************************
+StoreBuffer::StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size) {
+#ifdef TSO_CHECK
+ if (id == 0) {
+ g_tsoChecker = new Tso::TsoChecker();
+ g_tsoChecker->init(64);
+ }
+#endif
+ iseq = 0;
+ tso_iseq = 0;
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, id);
+ m_port = libruby_get_port(port_name, hit);
+ m_hit_callback = NULL;
+ ASSERT(storebuffer_size >= 0);
+ m_storebuffer_size = storebuffer_size;
+ m_id = id;
+ m_block_size = 1 << block_bits;
+ m_block_mask = ~(m_block_size - 1);
+ m_buffer_size = 0;
+ m_use_storebuffer = false;
+ m_storebuffer_full = false;
+ m_storebuffer_flushing = false;
+ m_stalled_issue = true;
+ if(m_storebuffer_size > 0){
+ m_use_storebuffer = true;
+ }
+
+ #ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n",m_use_storebuffer);
+ #endif
+}
+
+//******************************************************************************************
+StoreBuffer::~StoreBuffer(){
+#ifdef TSO_CHECK
+ if (m_id == 0) {
+ delete g_tsoChecker;
+ }
+#endif
+}
+
+//*****************************************************************************************************
+void StoreBuffer::registerHitCallback(void (*hit_callback)(int64_t request_id)) {
+ assert(m_hit_callback == NULL); // can't assign hit_callback twice
+ m_hit_callback = hit_callback;
+}
+
+
+//*****************************************************************************************************
+void StoreBuffer::addToStoreBuffer(struct RubyRequest request){
+ if(m_use_storebuffer){
+ #ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+ #endif
+
+ #ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\t INSERTING new request\n");
+ #endif
+
+
+ buffer.push_front(SBEntry(request, NULL));
+
+ m_buffer_size++;
+
+ if (m_buffer_size >= m_storebuffer_size) {
+ m_storebuffer_full = true;
+ }
+ else if (m_stalled_issue) {
+ m_stalled_issue = false;
+ issueNextStore();
+ }
+
+ iseq++;
+
+ #ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n");
+ DEBUG_OUT("\n");
+ #endif
+ } //end if(m_use_storebuffer)
+ else {
+ // make request to libruby
+ uint64_t id = libruby_issue_request(m_port, request);
+ if (request_map.find(id) != request_map.end()) {
+ ERROR_OUT("Request ID is already in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ }
+ else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request));
+ }
+ }
+}
+
+
+//*****************************************************************************************************
+// Return value of -2 indicates that the load request was satisfied by the store buffer
+// Return value of -3 indicates a partial match, so the load has to retry until NO_MATCH
+// Alternatively we could satisfy the partial match, but tso gets complicated and more races
+//*****************************************************************************************************
+int64_t StoreBuffer::handleLoad(struct RubyRequest request) {
+ if (m_use_storebuffer) {
+ load_match match = checkForLoadHit(request);
+ if (match == FULL_MATCH) {
+ // fill data
+ returnMatchedData(request);
+ iseq++;
+ return -2;
+ }
+ else if (match == NO_MATCH) {
+ // make request to libruby and return the id
+ uint64_t id = libruby_issue_request(m_port, request);
+ if (request_map.find(id) != request_map.end()) {
+ ERROR_OUT("Request ID is already in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ }
+ else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request));
+ }
+ iseq++;
+ return id;
+ }
+ else { // partial match
+ return -3;
+ }
+ }
+ else {
+ // make a request to ruby
+ return libruby_issue_request(m_port, request);
+ }
+}
+
+
+//*****************************************************************************************************
+// This function will fill the data array if any match is found
+//*****************************************************************************************************
+load_match StoreBuffer::checkForLoadHit(struct RubyRequest request) {
+ if (m_use_storebuffer) {
+ physical_address_t physical_address = request.paddr;
+ int len = request.len;
+
+ uint8_t * data = new uint8_t[64];
+ memset(data, 0, 64);
+ for (int i = physical_address%64; i < len; i++) {
+ data[i] = 1;
+ }
+
+ bool found = false;
+ physical_address_t lineaddr = physical_address & m_block_mask;
+
+ // iterate over the buffer looking for hits
+ for (deque<struct SBEntry>::iterator it = buffer.begin(); it != buffer.end(); it++) {
+ if ((it->m_request.paddr & m_block_mask) == lineaddr) {
+ found = true;
+ for (int i = it->m_request.paddr%64; i < it->m_request.len; i++) {
+ data[i] = 0;
+ }
+ }
+ }
+
+ // if any matching entry is found, determine if all the requested bytes have been matched
+ if (found) {
+ ASSERT(m_buffer_size > 0);
+ int unmatched_bytes = 0;
+ for (int i = physical_address%64; i < len; i++) {
+ unmatched_bytes = unmatched_bytes + data[i];
+ }
+ if (unmatched_bytes == 0) {
+ delete data;
+ return FULL_MATCH;
+ }
+ else {
+ delete data;
+ return PARTIAL_MATCH;
+ }
+ }
+ else {
+ delete data;
+ return NO_MATCH;
+ }
+ } // end of if (m_use_storebuffer)
+ else {
+ // this function should never be called if we are not using a store buffer
+ ERROR_OUT("checkForLoadHit called while write buffer is not in use");
+ ASSERT(0);
+ }
+}
+
+
+//***************************************************************************************************
+void StoreBuffer::returnMatchedData(struct RubyRequest request) {
+ if (m_use_storebuffer) {
+
+ uint8_t * data = new uint8_t[64];
+ memset(data, 0, 64);
+ uint8_t * written = new uint8_t[64];
+ memset(written, 0, 64);
+
+ physical_address_t physical_address = request.paddr;
+ int len = request.len;
+
+ ASSERT(checkForLoadHit(request) != NO_MATCH);
+ physical_address_t lineaddr = physical_address & m_block_mask;
+ bool found = false;
+ Tso::TsoCheckerCmd * cmd;
+ deque<struct SBEntry>::iterator satisfying_store;
+ for (deque<struct SBEntry>::iterator it = buffer.begin(); it != buffer.end(); it++) {
+ if ((it->m_request.paddr & m_block_mask) == lineaddr) {
+ if (!found) {
+ found = true;
+#ifdef TSO_CHECK
+ satisfying_store = it;
+ cmd = new Tso::TsoCheckerCmd(m_id, // this thread id
+ iseq, // instruction sequence
+ ITYPE_LOAD, // is a store
+ MEM_LOAD_DATA, // commit
+ request.paddr, // the address
+ NULL, // and data
+ request.len, // and len
+ DSRC_STB, // shouldn't matter
+ libruby_get_time(), // macc: for store macc and time are the same and it
+ 0, // gobs
+ 0);
+#endif
+ }
+ uint8_t * dataPtr = it->m_request.data;
+ int offset = it->m_request.paddr%64;
+ for (int i = offset; i < it->m_request.len; i++) {
+ if (!written[i]) { // don't overwrite data with earlier data
+ data[i] = dataPtr[i-offset];
+ written[i] = 1;
+ }
+ }
+ }
+ }
+
+ int i = physical_address%64;
+ for (int j = 0; (i < physical_address%64 + len) && (j < len); i++, j++) {
+ if (written[i]) {
+ request.data[j] = data[i];
+ }
+ }
+
+#ifdef TSO_CHECK
+ uint64_t tso_data = 0;
+ memcpy(&tso_data, request.data, request.len);
+ cmd->setData(tso_data);
+
+ Tso::TsoCheckerCmd * adjust_cmd = satisfying_store->m_next_ptr;
+ if (adjust_cmd == NULL) {
+ adjust_cmd = cmd;
+ }
+ else {
+ while (adjust_cmd->getNext() != NULL) {
+ adjust_cmd = adjust_cmd->getNext();
+ }
+ adjust_cmd->setNext(cmd);
+ }
+#endif
+
+ delete data;
+ delete written;
+ }
+ else {
+ ERROR_OUT("returnMatchedData called while write buffer is not in use");
+ ASSERT(0);
+ }
+}
+
+
+//******************************************************************************************
+void StoreBuffer::flushStoreBuffer(){
+ if (m_use_storebuffer) {
+ #ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+ #endif
+
+ if(m_buffer_size > 0) {
+ m_storebuffer_flushing = true; // indicate that we are flushing
+ }
+ else {
+ m_storebuffer_flushing = false;
+ return;
+ }
+ }
+ else {
+ // do nothing
+ return;
+ }
+}
+
+//****************************************************************************************
+void StoreBuffer::issueNextStore() {
+ SBEntry request = buffer.back();
+ uint64_t id = libruby_issue_request(m_port, request.m_request);
+ if (request_map.find(id) != request_map.end()) {
+ assert(0);
+ }
+ else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request.m_request));
+ }
+}
+
+//****************************************************************************************
+void StoreBuffer::complete(uint64_t id) {
+ if (m_use_storebuffer) {
+ ASSERT(outstanding_requests.find(id) != outstanding_requests.end());
+ physical_address_t physical_address = outstanding_requests.find(id)->second.paddr;
+ RubyRequestType type = outstanding_requests.find(id)->second.type;
+ int len = outstanding_requests.find(id)->second.len;
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: complete BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+#endif
+
+ if (type == RubyRequestType_ST) {
+ physical_address_t lineaddr = physical_address & m_block_mask;
+
+ //Note fastpath hits are handled like regular requests - they must remove the WB entry!
+ if ( lineaddr != physical_address ) {
+ ERROR_OUT("error: StoreBuffer: ruby returns pa 0x%0llx which is not a cache line: 0x%0llx\n", physical_address, lineaddr );
+ }
+
+ SBEntry from_buffer = buffer.back();
+ if (((from_buffer.m_request.paddr & m_block_mask) == lineaddr) && (from_buffer.m_request.type == type)) {
+ buffer.pop_back();
+ m_buffer_size--;
+ ASSERT(m_buffer_size >= 0);
+
+#ifdef TSO_CHECK
+ uint64_t data = 0;
+ memcpy(&data, from_buffer.m_request.data, 4);
+
+ cerr << m_id << " INSERTING STORE" << endl << flush;
+ // add to the tsoChecker
+ g_tsoChecker->input(m_id, // this thread id
+ (id & ISEQ_MASK), // instruction sequence
+ ITYPE_STORE, // is a store
+ MEM_STORE_COMMIT, // commit
+ physical_address, // the address
+ data, // and data
+ len, // and len
+ DSRC_STB, // shouldn't matter
+ libruby_get_time(), // macc
+ libruby_get_time(), // gobs
+ libruby_get_time()); // time
+ tso_iseq++;
+
+ // also add the loads that are satisfied by this store
+ if (from_buffer.m_next_ptr != NULL) {
+ from_buffer.m_next_ptr->setGobs(libruby_get_time());
+ g_tsoChecker->input(*(from_buffer.m_next_ptr));
+ cerr << m_id << " INSERTING LOAD for STORE: " << from_buffer.m_next_ptr->getIseq() << endl << flush;
+ tso_iseq++;
+ Tso::TsoCheckerCmd * to_input = from_buffer.m_next_ptr->getNext();
+ while (to_input != NULL) {
+ if (to_input->getGobs() == 0) {
+ to_input->setGobs(libruby_get_time());
+ }
+ cerr << m_id << " INSERTING LOAD iseq for STORE: " << to_input->getIseq() << endl << flush;
+ g_tsoChecker->input(*to_input);
+ tso_iseq++;
+ to_input = to_input->getNext();
+ }
+ }
+#endif
+ // schedule the next request
+ if (m_buffer_size > 0) {
+ issueNextStore();
+ }
+ else if (m_buffer_size == 0) {
+ m_storebuffer_flushing = false;
+ m_stalled_issue = true;
+ }
+
+ m_storebuffer_full = false;
+
+ }
+ else {
+ ERROR_OUT("[%d] error: StoreBuffer: at complete, address 0x%0llx not found.\n", m_id, lineaddr);
+ ERROR_OUT("StoreBuffer:: complete FAILS\n");
+ ASSERT(0);
+ }
+
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("***StoreBuffer: complete END, contents:\n");
+ DEBUG_OUT("\n");
+#endif
+ } // end if (type == ST)
+ else if (type == RubyRequestType_LD) {
+#ifdef TSO_CHECK
+ RubyRequest request = outstanding_requests.find(id)->second;
+ uint64_t data = 0;
+ memcpy(&data, request.data, request.len);
+
+ // add to the tsoChecker if in order, otherwise, find a place to put ourselves
+ if ((id & ISEQ_MASK) == tso_iseq) {
+ tso_iseq++;
+ cerr << m_id << " INSERTING LOAD" << endl << flush;
+ g_tsoChecker->input(m_id, // this thread id
+ (id & ISEQ_MASK), // instruction sequence
+ ITYPE_LOAD, // is a store
+ MEM_LOAD_DATA, // commit
+ request.paddr, // the address
+ data, // and data
+ request.len, // and len
+ DSRC_L2_MEMORY, // shouldn't matter DSRC_L1
+ libruby_get_time(), // macc: for store macc and time are the same and it
+ libruby_get_time(), // macc
+ libruby_get_time()); // time
+ }
+ else {
+ Tso::TsoCheckerCmd * cmd;
+ cmd = new Tso::TsoCheckerCmd(m_id, // this thread id
+ (id & ISEQ_MASK), // instruction sequence
+ ITYPE_LOAD, // is a store
+ MEM_LOAD_DATA, // commit
+ request.paddr, // the address
+ data, // and data
+ request.len, // and len
+ DSRC_L2_MEMORY, // shouldn't matter DSRC_L1
+ libruby_get_time(), // macc: for store macc and time are the same and it
+ libruby_get_time(), // macc
+ libruby_get_time()); // time
+ insertTsoLL(cmd);
+ }
+#endif
+ m_hit_callback(id);
+ }
+
+ // LD, ST or FETCH hit callback
+ outstanding_requests.erase(id);
+
+ } // end if(m_use_storebuffer)
+ else {
+ m_hit_callback(id);
+ }
+}
+
+
+void StoreBuffer::insertTsoLL(Tso::TsoCheckerCmd * cmd) {
+ uint64_t count = cmd->getIseq();
+ Tso::TsoCheckerCmd * current = NULL;
+ Tso::TsoCheckerCmd * previous = NULL;
+ deque<struct SBEntry>::reverse_iterator iter;
+ bool found = false;
+ for (iter = buffer.rbegin(); iter != buffer.rend(); ++ iter) {
+ if (iter->m_next_ptr != NULL) {
+ current = iter->m_next_ptr->getNext(); // initalize both to the beginning of the linked list
+ previous = current;
+ while (current != NULL) {
+ if (current->getIseq() > count) {
+ found = true;
+ break;
+ }
+ previous = current;
+ current = current->getNext();
+ }
+ }
+ // break out if found a match, iterator should still point to the right SBEntry
+ if (found) {
+ break;
+ }
+ }
+
+ // will insert at the end if not found
+ if (!found) {
+ buffer.front().m_next_ptr = cmd;
+ }
+ else if (current == previous) {
+ cerr << "INSERTING " << count << " BEFORE: " << iter->m_next_ptr->getIseq();
+ Tso::TsoCheckerCmd * temp = iter->m_next_ptr;
+ iter->m_next_ptr = cmd;
+ cmd->setNext(temp);
+ }
+ else {
+ cerr << "INSERTING " << count << " BETWEEN: " << previous->getIseq() << " AND " << current->getIseq();
+ cmd->setNext(current);
+ previous->setNext(cmd);
+ }
+}
+
+
+//***************************************************************************************************
+void StoreBuffer::print( void )
+{
+ DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n", m_id, m_buffer_size);
+
+ if(m_use_storebuffer){
+ }
+ else{
+ DEBUG_OUT("\t WRITE BUFFER NOT USED\n");
+ }
+}
+
+
+
+
diff --git a/src/mem/ruby/storebuffer/storebuffer.hh b/src/mem/ruby/storebuffer/storebuffer.hh
new file mode 100644
index 000000000..a5cf99f07
--- /dev/null
+++ b/src/mem/ruby/storebuffer/storebuffer.hh
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _STOREBUFFER_H_
+#define _STOREBUFFER_H_
+
+#include <map>
+#include <deque>
+#include "mem/ruby/storebuffer/hfa.hh"
+#include "mem/ruby/libruby.hh"
+#include "TsoCheckerCmd.hh"
+
+#define TSO_CHECK
+/**
+ * Status for write buffer accesses. The Write buffer can hit in fastpath, be full, or
+ * successfully enqueue the store request
+ */
+enum storebuffer_status_t { WB_FULL, WB_OK, WB_FLUSHING };
+
+/**
+ * Status of a load match
+ */
+enum load_match { NO_MATCH, PARTIAL_MATCH, FULL_MATCH };
+
+struct SBEntry {
+ struct RubyRequest m_request;
+ Tso::TsoCheckerCmd * m_next_ptr;
+ SBEntry(struct RubyRequest request, void * ptr) { m_request = request; m_next_ptr = (Tso::TsoCheckerCmd*) ptr; }
+};
+
+class StoreBuffer {
+ public:
+ ///Constructor
+ /// Note that the size of the Write Buffer is determined by the WRITE_BUFFER_SIZE config parameter
+ StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size);
+
+ /// Register hitcallback back to CPU
+ void registerHitCallback(void (*hit_callback)(int64_t request_id));
+
+ /// Destructor
+ ~StoreBuffer();
+
+ ///Adds a store entry to the write buffer
+ void addToStoreBuffer(struct RubyRequest request);
+
+ ///Flushes the entire write buffer
+ void flushStoreBuffer();
+
+ ///A pseq object calls this when Ruby completes our request
+ void complete(uint64_t);
+
+ /// Returns ID. If ID == -2, HIT, else it's an ID to wait on
+ int64_t handleLoad(struct RubyRequest request);
+
+ /// Used by all load insts to check whether it hits to any entry in the WB. If so, the WB is flushed
+ load_match checkForLoadHit(struct RubyRequest request);
+
+ /// Used to fill the load in case of FULL_MATCH
+ void returnMatchedData(struct RubyRequest request);
+
+ /// Issue next store in line
+ void issueNextStore();
+
+ /// prints out the contents of the Write Buffer
+ void print();
+
+ /// if load completes before store, insert correctly to be issued to TSOChecker
+ void insertTsoLL(Tso::TsoCheckerCmd * cmd);
+
+ /// Returns flag indicating whether we are using the write buffer
+ bool useStoreBuffer() { return m_use_storebuffer; }
+
+ bool storeBufferFull() { return m_storebuffer_full; }
+
+ bool storeBufferFlushing() { return m_storebuffer_flushing; }
+
+ private:
+ /// id of this write buffer (one per sequencer object)
+ uint32 m_id;
+
+ /// number of bytes in cacheline
+ uint32 m_block_size;
+
+ /// the size of the write buffer
+ uint32 m_storebuffer_size;
+
+ /// mask to strip off non-cache line bits
+ pa_t m_block_mask;
+
+ /// list of store requests in the write buffer
+ deque <struct SBEntry> buffer;
+
+ /// the current length of the write buffer
+ uint32 m_buffer_size;
+
+ /// whether we want to simulate the write buffer or not:
+ bool m_use_storebuffer;
+
+ /// indicates whether the write buffer is full or not
+ bool m_storebuffer_full;
+
+ /// indicates that we are currently flushing the write buffer
+ bool m_storebuffer_flushing;
+
+ /// indicates that automatic issue is stalled and the next store to be added should issue itself
+ bool m_stalled_issue;
+
+ /// RubyPort to make requests to
+ RubyPortHandle m_port;
+
+ /// HitCallback to CPU
+ void (*m_hit_callback)(int64_t);
+
+ /// Map the request id to rubyrequest
+ map<uint64_t, struct RubyRequest> outstanding_requests;
+
+ /// current instruction counter
+ uint64_t iseq;
+
+
+ /// input into tso counter
+ uint64_t tso_iseq;
+};
+
+#endif
diff --git a/src/mem/ruby/system/AbstractMemOrCache.hh b/src/mem/ruby/system/AbstractMemOrCache.hh
index e56e1f505..641c117de 100644
--- a/src/mem/ruby/system/AbstractMemOrCache.hh
+++ b/src/mem/ruby/system/AbstractMemOrCache.hh
@@ -11,7 +11,6 @@
#define ABSTRACT_MEM_OR_CACHE_H
#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/ruby/config/RubyConfig.hh"
#include "mem/ruby/common/Address.hh"
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
index 22799ab13..4d46ac908 100644
--- a/src/mem/ruby/system/CacheMemory.hh
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -38,13 +38,10 @@
#ifndef CACHEMEMORY_H
#define CACHEMEMORY_H
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
-
-//dsm: PRUNED
-//#include "mem/ruby/recorder/CacheRecorder.hh"
+#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/DataBlock.hh"
@@ -52,18 +49,25 @@
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/PseudoLRUPolicy.hh"
#include "mem/ruby/system/LRUPolicy.hh"
+#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/slicc_interface/AbstractController.hh"
#include <vector>
-template<class ENTRY>
class CacheMemory {
public:
// Constructors
- CacheMemory(AbstractChip* chip_ptr, int numSetBits, int cacheAssoc, const MachineType machType, const string& description);
+ CacheMemory(const string & name);
+ void init(const vector<string> & argv);
// Destructor
~CacheMemory();
+ // factory
+ // static CacheMemory* createCache(int level, int num, char split_type, AbstractCacheEntry* (*entry_factory)());
+ // static CacheMemory* getCache(int cache_id);
+
// Public Methods
void printConfig(ostream& out);
@@ -82,7 +86,7 @@ public:
bool cacheAvail(const Address& address) const;
// find an unused entry and sets the tag appropriate for the address
- void allocate(const Address& address);
+ void allocate(const Address& address, AbstractCacheEntry* new_entry);
// Explicitly free up this address
void deallocate(const Address& address);
@@ -91,16 +95,18 @@ public:
Address cacheProbe(const Address& address) const;
// looks an address up in the cache
- ENTRY& lookup(const Address& address);
- const ENTRY& lookup(const Address& address) const;
+ AbstractCacheEntry& lookup(const Address& address);
+ const AbstractCacheEntry& lookup(const Address& address) const;
// Get/Set permission of cache block
AccessPermission getPermission(const Address& address) const;
void changePermission(const Address& address, AccessPermission new_perm);
+ int getLatency() const { return m_latency; }
+
// Hook for checkpointing the contents of the cache
void recordCacheContents(CacheRecorder& tr) const;
- void setAsInstructionCache(bool is_icache) { m_is_instruction_cache = is_icache; }
+ void setAsInstructionCache(bool is_icache) { m_is_instruction_only_cache = is_icache; }
// Set this address to most recently used
void setMRU(const Address& address);
@@ -129,15 +135,18 @@ private:
CacheMemory(const CacheMemory& obj);
CacheMemory& operator=(const CacheMemory& obj);
+private:
+ const string m_cache_name;
+ AbstractController* m_controller;
+ int m_latency;
+
// Data Members (m_prefix)
- AbstractChip* m_chip_ptr;
- MachineType m_machType;
- string m_description;
- bool m_is_instruction_cache;
+ bool m_is_instruction_only_cache;
+ bool m_is_data_only_cache;
// The first index is the # of cache lines.
// The second index is the the amount associativity.
- Vector<Vector<ENTRY> > m_cache;
+ Vector<Vector<AbstractCacheEntry*> > m_cache;
AbstractReplacementPolicy *m_replacementPolicy_ptr;
@@ -145,18 +154,55 @@ private:
int m_cache_num_set_bits;
int m_cache_assoc;
- bool is_locked; // for LL/SC
+ static Vector< CacheMemory* > m_all_caches;
};
+/*
+inline
+CacheMemory* CacheMemory::getCache(int cache_id)
+{
+ assert(cache_id < RubyConfig::getNumberOfCaches());
+ if (m_all_caches[cache_id] == NULL) {
+ cerr << "ERROR: Tried to obtain CacheMemory that hasn't been created yet." << endl;
+ assert(0);
+ }
+ return m_all_caches[cache_id];
+}
+inline
+CacheMemory* CacheMemory::createCache(int level, int num, char split_type_c, AbstractCacheEntry* (*entry_factory)())
+{
+ string split_type;
+ switch(split_type_c) {
+ case 'i':
+ split_type = "instruction"; break;
+ case 'd':
+ split_type = "data"; break;
+ default:
+ split_type = "unified"; break;
+ }
+ int cache_id = RubyConfig::getCacheIDFromParams(level, num, split_type);
+ assert(cache_id < RubyConfig::getNumberOfCaches());
+ if (m_all_caches.size() == 0) {
+ m_all_caches.setSize(RubyConfig::getNumberOfCaches());
+ for (int i=0; i<m_all_caches.size(); i++)
+ m_all_caches[i] = NULL;
+ }
+
+ string type = RubyConfig::getCacheType(cache_id);
+ if ( type == "SetAssociativeCache" ) {
+ m_all_caches[cache_id] = new CacheMemory(cache_id, entry_factory);
+ }
+ return m_all_caches[cache_id];
+}
+*/
// Output operator declaration
//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj);
// ******************* Definitions *******************
// Output operator definition
-template<class ENTRY>
inline
-ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj)
+ostream& operator<<(ostream& out, const CacheMemory& obj)
{
obj.print(out);
out << flush;
@@ -166,112 +212,142 @@ ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj)
// ****************************************************************
-template<class ENTRY>
inline
-CacheMemory<ENTRY>::CacheMemory(AbstractChip* chip_ptr, int numSetBits,
- int cacheAssoc, const MachineType machType, const string& description)
+CacheMemory::CacheMemory(const string & name)
+ : m_cache_name(name)
+{
+}
+inline
+void CacheMemory::init(const vector<string> & argv)
{
- //cout << "CacheMemory constructor numThreads = " << numThreads << endl;
- m_chip_ptr = chip_ptr;
- m_machType = machType;
- m_description = MachineType_to_string(m_machType)+"_"+description;
- m_cache_num_set_bits = numSetBits;
- m_cache_num_sets = 1 << numSetBits;
- m_cache_assoc = cacheAssoc;
- m_is_instruction_cache = false;
+ int cache_size = 0;
+ string policy;
+
+ m_controller = NULL;
+ for (uint32 i=0; i<argv.size(); i+=2) {
+ if (argv[i] == "size_kb") {
+ cache_size = atoi(argv[i+1].c_str());
+ } else if (argv[i] == "latency") {
+ m_latency = atoi(argv[i+1].c_str());
+ } else if (argv[i] == "assoc") {
+ m_cache_assoc = atoi(argv[i+1].c_str());
+ } else if (argv[i] == "replacement_policy") {
+ policy = argv[i+1];
+ } else if (argv[i] == "controller") {
+ m_controller = RubySystem::getController(argv[i+1]);
+ } else {
+ cerr << "WARNING: CacheMemory: Unknown configuration parameter: " << argv[i] << endl;
+ }
+ }
- m_cache.setSize(m_cache_num_sets);
- if(strcmp(g_REPLACEMENT_POLICY, "PSEDUO_LRU") == 0)
+ m_cache_num_sets = cache_size / m_cache_assoc;
+ m_cache_num_set_bits = log_int(m_cache_num_sets);
+
+ if(policy == "PSEUDO_LRU")
m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
- else if(strcmp(g_REPLACEMENT_POLICY, "LRU") == 0)
+ else if (policy == "LRU")
m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
else
assert(false);
+
+ m_cache.setSize(m_cache_num_sets);
for (int i = 0; i < m_cache_num_sets; i++) {
m_cache[i].setSize(m_cache_assoc);
for (int j = 0; j < m_cache_assoc; j++) {
- m_cache[i][j].m_Address.setAddress(0);
- m_cache[i][j].m_Permission = AccessPermission_NotPresent;
+ m_cache[i][j] = NULL;
}
}
+}
+/*
+inline
+CacheMemory::CacheMemory(int cache_id, AbstractCacheEntry* (*entry_factory)())
+{
+ string split_type;
+
+ m_cache_id = cache_id;
+ m_entry_factory = entry_factory;
+
+ m_cache_num_set_bits = RubyConfig::getNumberOfCacheSetBits(cache_id);
+ m_cache_num_sets = RubyConfig::getNumberOfCacheSets(cache_id);
+ m_cache_assoc = RubyConfig::getCacheAssoc(cache_id);
+ split_type = RubyConfig::getCacheSplitType(cache_id);
+ m_is_instruction_only_cache = m_is_data_only_cache = false;
+ if (split_type == "instruction")
+ m_is_instruction_only_cache = true;
+ else if (split_type == "data")
+ m_is_data_only_cache = true;
+ else
+ assert(split_type == "unified");
+ if(RubyConfig::getCacheReplacementPolicy(cache_id) == "PSEUDO_LRU")
+ m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else if(RubyConfig::getCacheReplacementPolicy(cache_id) == "LRU")
+ m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else
+ assert(false);
- // cout << "Before setting trans address list size" << endl;
- //create a trans address for each SMT thread
-// m_trans_address_list.setSize(numThreads);
-// for(ThreadID tid = 0; tid < numThreads; ++tid){
-// cout << "Setting list size for list " << tid << endl;
-// m_trans_address_list[tid].setSize(30);
-// }
- //cout << "CacheMemory constructor finished" << endl;
+ m_cache.setSize(m_cache_num_sets);
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ m_cache[i].setSize(m_cache_assoc);
+ for (int j = 0; j < m_cache_assoc; j++) {
+ m_cache[i][j] = m_entry_factory();
+ }
+ }
}
-
-template<class ENTRY>
+*/
inline
-CacheMemory<ENTRY>::~CacheMemory()
+CacheMemory::~CacheMemory()
{
if(m_replacementPolicy_ptr != NULL)
delete m_replacementPolicy_ptr;
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::printConfig(ostream& out)
+void CacheMemory::printConfig(ostream& out)
{
- out << "Cache config: " << m_description << endl;
+ out << "Cache config: " << m_cache_name << endl;
+ if (m_controller != NULL)
+ out << " controller: " << m_controller->getName() << endl;
out << " cache_associativity: " << m_cache_assoc << endl;
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
const int cache_num_sets = 1 << m_cache_num_set_bits;
out << " num_cache_sets: " << cache_num_sets << endl;
- out << " cache_set_size_bytes: " << cache_num_sets * RubyConfig::dataBlockBytes() << endl;
+ out << " cache_set_size_bytes: " << cache_num_sets * RubySystem::getBlockSizeBytes() << endl;
out << " cache_set_size_Kbytes: "
- << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ << double(cache_num_sets * RubySystem::getBlockSizeBytes()) / (1<<10) << endl;
out << " cache_set_size_Mbytes: "
- << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+ << double(cache_num_sets * RubySystem::getBlockSizeBytes()) / (1<<20) << endl;
out << " cache_size_bytes: "
- << cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc << endl;
+ << cache_num_sets * RubySystem::getBlockSizeBytes() * m_cache_assoc << endl;
out << " cache_size_Kbytes: "
- << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<10) << endl;
+ << double(cache_num_sets * RubySystem::getBlockSizeBytes() * m_cache_assoc) / (1<<10) << endl;
out << " cache_size_Mbytes: "
- << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<20) << endl;
+ << double(cache_num_sets * RubySystem::getBlockSizeBytes() * m_cache_assoc) / (1<<20) << endl;
}
// PRIVATE METHODS
// convert a Address to its location in the cache
-template<class ENTRY>
inline
-Index CacheMemory<ENTRY>::addressToCacheSet(const Address& address) const
+Index CacheMemory::addressToCacheSet(const Address& address) const
{
assert(address == line_address(address));
Index temp = -1;
- switch (m_machType) {
- case MACHINETYPE_L1CACHE_ENUM:
- temp = map_address_to_L1CacheSet(address, m_cache_num_set_bits);
- break;
- case MACHINETYPE_L2CACHE_ENUM:
- temp = map_address_to_L2CacheSet(address, m_cache_num_set_bits);
- break;
- default:
- ERROR_MSG("Don't recognize m_machType");
- }
- assert(temp < m_cache_num_sets);
- assert(temp >= 0);
- return temp;
+ return address.bitSelect(RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_cache_num_set_bits-1);
}
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
-template<class ENTRY>
inline
-int CacheMemory<ENTRY>::findTagInSet(Index cacheSet, const Address& tag) const
+int CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
{
assert(tag == line_address(tag));
// search the set for the tags
for (int i=0; i < m_cache_assoc; i++) {
- if ((m_cache[cacheSet][i].m_Address == tag) &&
- (m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent)) {
+ if ((m_cache[cacheSet][i] != NULL) &&
+ (m_cache[cacheSet][i]->m_Address == tag) &&
+ (m_cache[cacheSet][i]->m_Permission != AccessPermission_NotPresent)) {
return i;
}
}
@@ -280,39 +356,37 @@ int CacheMemory<ENTRY>::findTagInSet(Index cacheSet, const Address& tag) const
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
-template<class ENTRY>
inline
-int CacheMemory<ENTRY>::findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const
+int CacheMemory::findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const
{
assert(tag == line_address(tag));
// search the set for the tags
for (int i=0; i < m_cache_assoc; i++) {
- if (m_cache[cacheSet][i].m_Address == tag)
+ if (m_cache[cacheSet][i] != NULL && m_cache[cacheSet][i]->m_Address == tag)
return i;
}
return -1; // Not found
}
// PUBLIC METHODS
-template<class ENTRY>
inline
-bool CacheMemory<ENTRY>::tryCacheAccess(const Address& address,
- CacheRequestType type,
- DataBlock*& data_ptr)
+bool CacheMemory::tryCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
{
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc != -1){ // Do we even have a tag match?
- ENTRY& entry = m_cache[cacheSet][loc];
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
- data_ptr = &(entry.getDataBlk());
+ data_ptr = &(entry->getDataBlk());
- if(entry.m_Permission == AccessPermission_Read_Write) {
+ if(entry->m_Permission == AccessPermission_Read_Write) {
return true;
}
- if ((entry.m_Permission == AccessPermission_Read_Only) &&
+ if ((entry->m_Permission == AccessPermission_Read_Only) &&
(type == CacheRequestType_LD || type == CacheRequestType_IFETCH)) {
return true;
}
@@ -322,31 +396,29 @@ bool CacheMemory<ENTRY>::tryCacheAccess(const Address& address,
return false;
}
-template<class ENTRY>
inline
-bool CacheMemory<ENTRY>::testCacheAccess(const Address& address,
- CacheRequestType type,
- DataBlock*& data_ptr)
+bool CacheMemory::testCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
{
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc != -1){ // Do we even have a tag match?
- ENTRY& entry = m_cache[cacheSet][loc];
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
- data_ptr = &(entry.getDataBlk());
+ data_ptr = &(entry->getDataBlk());
- return (m_cache[cacheSet][loc].m_Permission != AccessPermission_NotPresent);
+ return (m_cache[cacheSet][loc]->m_Permission != AccessPermission_NotPresent);
}
data_ptr = NULL;
return false;
}
// tests to see if an address is present in the cache
-template<class ENTRY>
inline
-bool CacheMemory<ENTRY>::isTagPresent(const Address& address) const
+bool CacheMemory::isTagPresent(const Address& address) const
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
@@ -366,31 +438,29 @@ bool CacheMemory<ENTRY>::isTagPresent(const Address& address) const
// Returns true if there is:
// a) a tag match on this address or there is
// b) an unused line in the same cache "way"
-template<class ENTRY>
inline
-bool CacheMemory<ENTRY>::cacheAvail(const Address& address) const
+bool CacheMemory::cacheAvail(const Address& address) const
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
for (int i=0; i < m_cache_assoc; i++) {
- if (m_cache[cacheSet][i].m_Address == address) {
- // Already in the cache
- return true;
- }
-
- if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
- // We found an empty entry
+ AbstractCacheEntry* entry = m_cache[cacheSet][i];
+ if (entry != NULL) {
+ if (entry->m_Address == address || // Already in the cache
+ entry->m_Permission == AccessPermission_NotPresent) { // We found an empty entry
+ return true;
+ }
+ } else {
return true;
}
}
return false;
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::allocate(const Address& address)
+void CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
{
assert(address == line_address(address));
assert(!isTagPresent(address));
@@ -400,10 +470,11 @@ void CacheMemory<ENTRY>::allocate(const Address& address)
// Find the first open slot
Index cacheSet = addressToCacheSet(address);
for (int i=0; i < m_cache_assoc; i++) {
- if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
- m_cache[cacheSet][i] = ENTRY(); // Init entry
- m_cache[cacheSet][i].m_Address = address;
- m_cache[cacheSet][i].m_Permission = AccessPermission_Invalid;
+ if (m_cache[cacheSet][i] == NULL ||
+ m_cache[cacheSet][i]->m_Permission == AccessPermission_NotPresent) {
+ m_cache[cacheSet][i] = entry; // Init entry
+ m_cache[cacheSet][i]->m_Address = address;
+ m_cache[cacheSet][i]->m_Permission = AccessPermission_Invalid;
m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
@@ -413,63 +484,62 @@ void CacheMemory<ENTRY>::allocate(const Address& address)
ERROR_MSG("Allocate didn't find an available entry");
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::deallocate(const Address& address)
+void CacheMemory::deallocate(const Address& address)
{
assert(address == line_address(address));
assert(isTagPresent(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
- lookup(address).m_Permission = AccessPermission_NotPresent;
+ Index cacheSet = addressToCacheSet(address);
+ int location = findTagInSet(cacheSet, address);
+ if (location != -1){
+ delete m_cache[cacheSet][location];
+ m_cache[cacheSet][location] = NULL;
+ }
}
// Returns with the physical address of the conflicting cache line
-template<class ENTRY>
inline
-Address CacheMemory<ENTRY>::cacheProbe(const Address& address) const
+Address CacheMemory::cacheProbe(const Address& address) const
{
assert(address == line_address(address));
assert(!cacheAvail(address));
Index cacheSet = addressToCacheSet(address);
- return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)].m_Address;
+ return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->m_Address;
}
// looks an address up in the cache
-template<class ENTRY>
inline
-ENTRY& CacheMemory<ENTRY>::lookup(const Address& address)
+AbstractCacheEntry& CacheMemory::lookup(const Address& address)
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- return m_cache[cacheSet][loc];
+ return *m_cache[cacheSet][loc];
}
// looks an address up in the cache
-template<class ENTRY>
inline
-const ENTRY& CacheMemory<ENTRY>::lookup(const Address& address) const
+const AbstractCacheEntry& CacheMemory::lookup(const Address& address) const
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- return m_cache[cacheSet][loc];
+ return *m_cache[cacheSet][loc];
}
-template<class ENTRY>
inline
-AccessPermission CacheMemory<ENTRY>::getPermission(const Address& address) const
+AccessPermission CacheMemory::getPermission(const Address& address) const
{
assert(address == line_address(address));
return lookup(address).m_Permission;
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+void CacheMemory::changePermission(const Address& address, AccessPermission new_perm)
{
assert(address == line_address(address));
lookup(address).m_Permission = new_perm;
@@ -477,9 +547,8 @@ void CacheMemory<ENTRY>::changePermission(const Address& address, AccessPermissi
}
// Sets the most recently used bit for a cache block
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::setMRU(const Address& address)
+void CacheMemory::setMRU(const Address& address)
{
Index cacheSet;
@@ -489,19 +558,15 @@ void CacheMemory<ENTRY>::setMRU(const Address& address)
g_eventQueue_ptr->getTime());
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::recordCacheContents(CacheRecorder& tr) const
+void CacheMemory::recordCacheContents(CacheRecorder& tr) const
{
-//dsm: Uses CacheRecorder, PRUNED
-assert(false);
-
-/* for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
- AccessPermission perm = m_cache[i][j].m_Permission;
+ AccessPermission perm = m_cache[i][j]->m_Permission;
CacheRequestType request_type = CacheRequestType_NULL;
if (perm == AccessPermission_Read_Only) {
- if (m_is_instruction_cache) {
+ if (m_is_instruction_only_cache) {
request_type = CacheRequestType_IFETCH;
} else {
request_type = CacheRequestType_LD;
@@ -511,55 +576,59 @@ assert(false);
}
if (request_type != CacheRequestType_NULL) {
- tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
- Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
+ // tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
+ // Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
}
}
- }*/
+ }
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::print(ostream& out) const
+void CacheMemory::print(ostream& out) const
{
- out << "Cache dump: " << m_description << endl;
+ out << "Cache dump: " << m_cache_name << endl;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
- out << " Index: " << i
- << " way: " << j
- << " entry: " << m_cache[i][j] << endl;
+ if (m_cache[i][j] != NULL) {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: " << *m_cache[i][j] << endl;
+ } else {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: NULL" << endl;
+ }
}
}
}
-template<class ENTRY>
inline
-void CacheMemory<ENTRY>::printData(ostream& out) const
+void CacheMemory::printData(ostream& out) const
{
out << "printData() not supported" << endl;
}
-template<class ENTRY>
-void CacheMemory<ENTRY>::getMemoryValue(const Address& addr, char* value,
- unsigned int size_in_bytes ){
- ENTRY entry = lookup(line_address(addr));
+inline
+void CacheMemory::getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ AbstractCacheEntry& entry = lookup(line_address(addr));
unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
for(unsigned int i=0; i<size_in_bytes; ++i){
- value[i] = entry.m_DataBlk.getByte(i + startByte);
+ value[i] = entry.getDataBlk().getByte(i + startByte);
}
}
-template<class ENTRY>
-void CacheMemory<ENTRY>::setMemoryValue(const Address& addr, char* value,
- unsigned int size_in_bytes ){
- ENTRY& entry = lookup(line_address(addr));
+inline
+void CacheMemory::setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ AbstractCacheEntry& entry = lookup(line_address(addr));
unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
assert(size_in_bytes > 0);
for(unsigned int i=0; i<size_in_bytes; ++i){
- entry.m_DataBlk.setByte(i + startByte, value[i]);
+ entry.getDataBlk().setByte(i + startByte, value[i]);
}
- entry = lookup(line_address(addr));
+ // entry = lookup(line_address(addr));
}
#endif //CACHEMEMORY_H
diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc
new file mode 100644
index 000000000..4aa092113
--- /dev/null
+++ b/src/mem/ruby/system/DMASequencer.cc
@@ -0,0 +1,130 @@
+
+#include "mem/ruby/system/DMASequencer.hh"
+#include "mem/ruby/buffers/MessageBuffer.hh"
+#include "mem/ruby/slicc_interface/AbstractController.hh"
+
+/* SLICC generated types */
+#include "mem/protocol/DMARequestMsg.hh"
+#include "mem/protocol/DMARequestType.hh"
+#include "mem/protocol/DMAResponseMsg.hh"
+#include "mem/ruby/system/System.hh"
+
+DMASequencer::DMASequencer(const string & name)
+ : RubyPort(name)
+{
+}
+
+void DMASequencer::init(const vector<string> & argv)
+{
+ m_version = -1;
+ m_controller = NULL;
+ for (size_t i=0;i<argv.size();i+=2) {
+ if (argv[i] == "controller")
+ m_controller = RubySystem::getController(argv[i+1]);
+ else if (argv[i] == "version")
+ m_version = atoi(argv[i+1].c_str());
+ }
+ assert(m_controller != NULL);
+ assert(m_version != -1);
+
+ m_mandatory_q_ptr = m_controller->getMandatoryQueue();
+ m_is_busy = false;
+}
+
+int64_t DMASequencer::makeRequest(const RubyRequest & request)
+{
+ uint64_t paddr = request.paddr;
+ uint8_t* data = request.data;
+ int len = request.len;
+ bool write = false;
+ switch(request.type) {
+ case RubyRequestType_LD:
+ write = false;
+ break;
+ case RubyRequestType_ST:
+ write = true;
+ break;
+ case RubyRequestType_NULL:
+ case RubyRequestType_IFETCH:
+ case RubyRequestType_RMW:
+ assert(0);
+ }
+
+ assert(!m_is_busy);
+ m_is_busy = true;
+
+ active_request.start_paddr = paddr;
+ active_request.write = write;
+ active_request.data = data;
+ active_request.len = len;
+ active_request.bytes_completed = 0;
+ active_request.bytes_issued = 0;
+ active_request.id = makeUniqueRequestID();
+
+ DMARequestMsg msg;
+ msg.getPhysicalAddress() = Address(paddr);
+ msg.getType() = write ? DMARequestType_WRITE : DMARequestType_READ;
+ msg.getOffset() = paddr & RubyConfig::dataBlockMask();
+ msg.getLen() = (msg.getOffset() + len) < RubySystem::getBlockSizeBytes() ?
+ (msg.getOffset() + len) :
+ RubySystem::getBlockSizeBytes() - msg.getOffset();
+ if (write) {
+ msg.getType() = DMARequestType_WRITE;
+ msg.getDataBlk().setData(data, 0, msg.getLen());
+ } else {
+ msg.getType() = DMARequestType_READ;
+ }
+ m_mandatory_q_ptr->enqueue(msg);
+ active_request.bytes_issued += msg.getLen();
+
+ return active_request.id;
+}
+
+void DMASequencer::issueNext()
+{
+ assert(m_is_busy == true);
+ active_request.bytes_completed = active_request.bytes_issued;
+ if (active_request.len == active_request.bytes_completed) {
+ m_hit_callback(active_request.id);
+ m_is_busy = false;
+ return;
+ }
+
+ DMARequestMsg msg;
+ msg.getPhysicalAddress() = Address(active_request.start_paddr + active_request.bytes_completed);
+ assert((msg.getPhysicalAddress().getAddress() & RubyConfig::dataBlockMask()) == 0);
+ msg.getOffset() = 0;
+ msg.getType() = active_request.write ? DMARequestType_WRITE : DMARequestType_READ;
+ msg.getLen() = active_request.len - active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
+ active_request.len - active_request.bytes_completed :
+ RubySystem::getBlockSizeBytes();
+ if (active_request.write) {
+ msg.getDataBlk().setData(&active_request.data[active_request.bytes_completed], 0, msg.getLen());
+ msg.getType() = DMARequestType_WRITE;
+ } else {
+ msg.getType() = DMARequestType_READ;
+ }
+ m_mandatory_q_ptr->enqueue(msg);
+ active_request.bytes_issued += msg.getLen();
+}
+
+void DMASequencer::dataCallback(const DataBlock & dblk)
+{
+ assert(m_is_busy == true);
+ int len = active_request.bytes_issued - active_request.bytes_completed;
+ int offset = 0;
+ if (active_request.bytes_completed == 0)
+ offset = active_request.start_paddr & RubyConfig::dataBlockMask();
+ memcpy(&active_request.data[active_request.bytes_completed], dblk.getData(offset, len), len);
+ issueNext();
+}
+
+void DMASequencer::ackCallback()
+{
+ issueNext();
+}
+
+void DMASequencer::printConfig(ostream & out)
+{
+
+}
diff --git a/src/mem/ruby/system/DMASequencer.hh b/src/mem/ruby/system/DMASequencer.hh
new file mode 100644
index 000000000..2665549e3
--- /dev/null
+++ b/src/mem/ruby/system/DMASequencer.hh
@@ -0,0 +1,49 @@
+
+#ifndef DMASEQUENCER_H
+#define DMASEQUENCER_H
+
+#include <ostream>
+#include "mem/ruby/common/DataBlock.hh"
+#include "mem/ruby/system/RubyPort.hh"
+
+struct DMARequest {
+ uint64_t start_paddr;
+ int len;
+ bool write;
+ int bytes_completed;
+ int bytes_issued;
+ uint8* data;
+ int64_t id;
+};
+
+class MessageBuffer;
+class AbstractController;
+
+class DMASequencer :public RubyPort {
+public:
+ DMASequencer(const string & name);
+ void init(const vector<string> & argv);
+ /* external interface */
+ int64_t makeRequest(const RubyRequest & request);
+ // void issueRequest(uint64_t paddr, uint8* data, int len, bool rw);
+ bool busy() { return m_is_busy;}
+
+ /* SLICC callback */
+ void dataCallback(const DataBlock & dblk);
+ void ackCallback();
+
+ void printConfig(std::ostream & out);
+
+private:
+ void issueNext();
+
+private:
+ int m_version;
+ AbstractController* m_controller;
+ bool m_is_busy;
+ DMARequest active_request;
+ int num_active_requests;
+ MessageBuffer* m_mandatory_q_ptr;
+};
+
+#endif // DMACONTROLLER_H
diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc
index 40b990e2d..294d57de2 100644
--- a/src/mem/ruby/system/DirectoryMemory.cc
+++ b/src/mem/ruby/system/DirectoryMemory.cc
@@ -37,118 +37,150 @@
*/
#include "mem/ruby/system/System.hh"
-#include "mem/ruby/common/Driver.hh"
#include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/protocol/Chip.hh"
+#include "mem/ruby/slicc_interface/AbstractController.hh"
+#include "mem/gems_common/util.hh"
-DirectoryMemory::DirectoryMemory(Chip* chip_ptr, int version)
+int DirectoryMemory::m_num_directories = 0;
+int DirectoryMemory::m_num_directories_bits = 0;
+int DirectoryMemory::m_total_size_bytes = 0;
+
+DirectoryMemory::DirectoryMemory(const string & name)
+ : m_name(name)
{
- m_chip_ptr = chip_ptr;
- m_version = version;
- // THIS DOESN'T SEEM TO WORK -- MRM
- // m_size = RubyConfig::memoryModuleBlocks()/RubyConfig::numberOfDirectory();
- m_size = RubyConfig::memoryModuleBlocks();
- assert(m_size > 0);
- /*********************************************************************
- // allocates an array of directory entry pointers & sets them to NULL
- m_entries = new Directory_Entry*[m_size];
- if (m_entries == NULL) {
- ERROR_MSG("Directory Memory: unable to allocate memory.");
+}
+
+void DirectoryMemory::init(const vector<string> & argv)
+{
+ m_controller = NULL;
+ for (vector<string>::const_iterator it = argv.begin(); it != argv.end(); it++) {
+ if ( (*it) == "version" )
+ m_version = atoi( (*(++it)).c_str() );
+ else if ( (*it) == "size_mb" ) {
+ m_size_bytes = atoi((*(++it)).c_str()) * (1<<20);
+ m_size_bits = log_int(m_size_bytes);
+ } else if ( (*it) == "controller" ) {
+ m_controller = RubySystem::getController((*(++it)));
+ } else
+ assert(0);
}
+ assert(m_controller != NULL);
- for (int i=0; i < m_size; i++) {
+ m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes();
+ m_entries = new Directory_Entry*[m_num_entries];
+ for (int i=0; i < m_num_entries; i++)
m_entries[i] = NULL;
- }
- */////////////////////////////////////////////////////////////////////
+
+ m_ram = g_system_ptr->getMemoryVector();
+
+ m_num_directories++;
+ m_num_directories_bits = log_int(m_num_directories);
+ m_total_size_bytes += m_size_bytes;
}
DirectoryMemory::~DirectoryMemory()
{
- /*********************************************************************
// free up all the directory entries
- for (int i=0; i < m_size; i++) {
- if (m_entries[i] != NULL) {
- delete m_entries[i];
- m_entries[i] = NULL;
- }
- }
+ for (int i=0;i<m_num_entries;i++)
+ if (m_entries[i] != NULL)
+ delete m_entries;
+ if (m_entries != NULL)
+ delete [] m_entries;
+}
- // free up the array of directory entries
- delete[] m_entries;
- *//////////////////////////////////////////////////////////////////////
- m_entries.clear();
+void DirectoryMemory::printConfig(ostream& out) const
+{
+ out << "DirectoryMemory module config: " << m_name << endl;
+ out << " controller: " << m_controller->getName() << endl;
+ out << " version: " << m_version << endl;
+ out << " memory_bits: " << m_size_bits << endl;
+ out << " memory_size_bytes: " << m_size_bytes << endl;
+ out << " memory_size_Kbytes: " << double(m_size_bytes) / (1<<10) << endl;
+ out << " memory_size_Mbytes: " << double(m_size_bytes) / (1<<20) << endl;
+ out << " memory_size_Gbytes: " << double(m_size_bytes) / (1<<30) << endl;
}
// Static method
-void DirectoryMemory::printConfig(ostream& out)
+void DirectoryMemory::printGlobalConfig(ostream & out)
+{
+ out << "DirectoryMemory Global Config: " << endl;
+ out << " number of directory memories: " << m_num_directories << endl;
+ if (m_num_directories > 1) {
+ out << " number of selection bits: " << m_num_directories_bits << endl;
+ out << " selection bits: " << RubySystem::getBlockSizeBits()+m_num_directories_bits-1
+ << "-" << RubySystem::getBlockSizeBits() << endl;
+ }
+ out << " total memory size bytes: " << m_total_size_bytes << endl;
+ out << " total memory size bits: " << log_int(m_total_size_bytes) << endl;
+
+}
+
+int DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address)
{
- out << "Memory config:" << endl;
- out << " memory_bits: " << RubyConfig::memorySizeBits() << endl;
- out << " memory_size_bytes: " << RubyConfig::memorySizeBytes() << endl;
- out << " memory_size_Kbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<10) << endl;
- out << " memory_size_Mbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<20) << endl;
- out << " memory_size_Gbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<30) << endl;
-
- out << " module_bits: " << RubyConfig::memoryModuleBits() << endl;
- out << " module_size_lines: " << RubyConfig::memoryModuleBlocks() << endl;
- out << " module_size_bytes: " << RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes() << endl;
- out << " module_size_Kbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
- out << " module_size_Mbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+ if (m_num_directories_bits == 0) return 0;
+ int ret = address.bitSelect(RubySystem::getBlockSizeBits(),
+ RubySystem::getBlockSizeBits()+m_num_directories_bits-1);
+ return ret;
}
// Public method
bool DirectoryMemory::isPresent(PhysAddress address)
{
- return (map_Address_to_DirectoryNode(address) == m_chip_ptr->getID()*RubyConfig::numberOfDirectoryPerChip()+m_version);
+ bool ret = (mapAddressToDirectoryVersion(address) == m_version);
+ return ret;
}
-void DirectoryMemory::readPhysMem(uint64 address, int size, void * data)
+int DirectoryMemory::mapAddressToLocalIdx(PhysAddress address)
{
+ int ret = address.getAddress() >> (RubySystem::getBlockSizeBits() + m_num_directories_bits);
+ return ret;
}
Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
{
assert(isPresent(address));
+ Directory_Entry* entry;
+ int idx = mapAddressToLocalIdx(address);
+ entry = m_entries[idx];
+ if (entry == NULL) {
+ entry = new Directory_Entry;
+ entry->getDataBlk().assign(m_ram->getBlockPtr(address));
+ m_entries[idx] = entry;
+ }
+ return (*entry);
+}
+/*
+Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
+{
+ assert(isPresent(address));
Index index = address.memoryModuleIndex();
if (index < 0 || index > m_size) {
- WARN_EXPR(m_chip_ptr->getID());
WARN_EXPR(address.getAddress());
WARN_EXPR(index);
WARN_EXPR(m_size);
ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
}
-
- map<Index, Directory_Entry*>::iterator iter = m_entries.find(index);
- Directory_Entry* entry = m_entries.find(index)->second;
+ Directory_Entry* entry = m_entries[index];
// allocate the directory entry on demand.
- if (iter == m_entries.end()) {
+ if (entry == NULL) {
entry = new Directory_Entry;
+ entry->getDataBlk().assign(m_ram->getBlockPtr(address));
- // entry->getProcOwner() = m_chip_ptr->getID(); // FIXME - This should not be hard coded
- // entry->getDirOwner() = true; // FIXME - This should not be hard-coded
-
- // load the data from physicalMemory when first initalizing
- physical_address_t physAddr = address.getAddress();
- int8 * dataArray = (int8 * )malloc(RubyConfig::dataBlockBytes() * sizeof(int8));
- readPhysMem(physAddr, RubyConfig::dataBlockBytes(), dataArray);
-
- for(int j=0; j < RubyConfig::dataBlockBytes(); j++) {
- entry->getDataBlk().setByte(j, dataArray[j]);
- }
- DEBUG_EXPR(NODE_COMP, MedPrio,entry->getDataBlk());
// store entry to the table
- m_entries.insert(make_pair(index, entry));
+ m_entries[index] = entry;
}
+
return (*entry);
}
+*/
-/*
void DirectoryMemory::invalidateBlock(PhysAddress address)
{
+ /*
assert(isPresent(address));
Index index = address.memoryModuleIndex();
@@ -161,16 +193,11 @@ void DirectoryMemory::invalidateBlock(PhysAddress address)
delete m_entries[index];
m_entries[index] = NULL;
}
-
+ */
}
-*/
void DirectoryMemory::print(ostream& out) const
{
- out << "Directory dump: " << endl;
- for(map<Index, Directory_Entry*>::const_iterator it = m_entries.begin(); it != m_entries.end(); ++it) {
- out << it->first << ": ";
- out << *(it->second) << endl;
- }
+
}
diff --git a/src/mem/ruby/system/DirectoryMemory.hh b/src/mem/ruby/system/DirectoryMemory.hh
index 6451ed459..6445ecc62 100644
--- a/src/mem/ruby/system/DirectoryMemory.hh
+++ b/src/mem/ruby/system/DirectoryMemory.hh
@@ -41,26 +41,34 @@
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/system/MemoryVector.hh"
#include "mem/protocol/Directory_Entry.hh"
-#include <map>
-class Chip;
+class AbstractController;
class DirectoryMemory {
public:
// Constructors
- DirectoryMemory(Chip* chip_ptr, int version);
+ DirectoryMemory(const string & name);
+ void init(const vector<string> & argv);
+ // DirectoryMemory(int version);
// Destructor
~DirectoryMemory();
+ int mapAddressToLocalIdx(PhysAddress address);
+ static int mapAddressToDirectoryVersion(PhysAddress address);
+
+ int getSize() { return m_size_bytes; }
+
// Public Methods
- static void printConfig(ostream& out);
+ void printConfig(ostream& out) const;
+ static void printGlobalConfig(ostream & out);
bool isPresent(PhysAddress address);
- // dummy function
- void readPhysMem(uint64 address, int size, void * data);
Directory_Entry& lookup(PhysAddress address);
+ void invalidateBlock(PhysAddress address);
+
void print(ostream& out) const;
private:
@@ -70,11 +78,22 @@ private:
DirectoryMemory(const DirectoryMemory& obj);
DirectoryMemory& operator=(const DirectoryMemory& obj);
+private:
+ const string m_name;
+ AbstractController* m_controller;
// Data Members (m_ prefix)
- map<Index, Directory_Entry*> m_entries;
- Chip* m_chip_ptr;
- int m_size; // # of memory module blocks for this directory
+ Directory_Entry **m_entries;
+ // int m_size; // # of memory module blocks this directory is responsible for
+ uint32 m_size_bytes;
+ uint32 m_size_bits;
+ int m_num_entries;
int m_version;
+
+ static int m_num_directories;
+ static int m_num_directories_bits;
+ static int m_total_size_bytes;
+
+ MemoryVector* m_ram;
};
// Output operator declaration
diff --git a/src/mem/ruby/system/MemoryControl.cc b/src/mem/ruby/system/MemoryControl.cc
index 86c6526c8..f9159ed3e 100644
--- a/src/mem/ruby/system/MemoryControl.cc
+++ b/src/mem/ruby/system/MemoryControl.cc
@@ -110,21 +110,21 @@
*
*/
-#include <list>
-
-#include "base/cprintf.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/slicc_interface/NetworkMessage.hh"
#include "mem/ruby/network/Network.hh"
+
#include "mem/ruby/common/Consumer.hh"
+
#include "mem/ruby/system/MemoryControl.hh"
+#include <list>
+
class Consumer;
// Value to reset watchdog timer to.
@@ -151,32 +151,66 @@ ostream& operator<<(ostream& out, const MemoryControl& obj)
// ****************************************************************
// CONSTRUCTOR
+MemoryControl::MemoryControl(const string & name)
+ : m_name(name)
+{
+ m_name = name;
+// printf ("MemoryControl name is %s \n", m_name.c_str());
+}
+
+void MemoryControl::init(const vector<string> & argv)
+{
+
+ for (vector<string>::const_iterator it = argv.begin(); it != argv.end(); it++) {
+ if ( (*it) == "version" )
+ m_version = atoi( (*(++it)).c_str() );
+ else if ( (*it) == "mem_bus_cycle_multiplier" ) {
+ m_mem_bus_cycle_multiplier = atoi((*(++it)).c_str());
+ } else if ( (*it) == "banks_per_rank" ) {
+ m_banks_per_rank = atoi((*(++it)).c_str());
+ } else if ( (*it) == "ranks_per_dimm" ) {
+ m_ranks_per_dimm = atoi((*(++it)).c_str());
+ } else if ( (*it) == "dimms_per_channel" ) {
+ m_dimms_per_channel = atoi((*(++it)).c_str());
+ } else if ( (*it) == "bank_bit_0" ) {
+ m_bank_bit_0 = atoi((*(++it)).c_str());
+ } else if ( (*it) == "rank_bit_0" ) {
+ m_rank_bit_0 = atoi((*(++it)).c_str());
+ } else if ( (*it) == "dimm_bit_0" ) {
+ m_dimm_bit_0 = atoi((*(++it)).c_str());
+ } else if ( (*it) == "bank_queue_size" ) {
+ m_bank_queue_size = atoi((*(++it)).c_str());
+ } else if ( (*it) == "bank_busy_time" ) {
+ m_bank_busy_time = atoi((*(++it)).c_str());
+ } else if ( (*it) == "rank_rank_delay" ) {
+ m_rank_rank_delay = atoi((*(++it)).c_str());
+ } else if ( (*it) == "read_write_delay" ) {
+ m_read_write_delay = atoi((*(++it)).c_str());
+ } else if ( (*it) == "basic_bus_busy_time" ) {
+ m_basic_bus_busy_time = atoi((*(++it)).c_str());
+ } else if ( (*it) == "mem_ctl_latency" ) {
+ m_mem_ctl_latency = atoi((*(++it)).c_str());
+ } else if ( (*it) == "refresh_period" ) {
+ m_refresh_period = atoi((*(++it)).c_str());
+ } else if ( (*it) == "tFaw" ) {
+ m_tFaw = atoi((*(++it)).c_str());
+ } else if ( (*it) == "mem_random_arbitrate" ) {
+ m_mem_random_arbitrate = atoi((*(++it)).c_str());
+ } else if ( (*it) == "mem_fixed_delay" ) {
+ m_mem_fixed_delay = atoi((*(++it)).c_str());
+ }
+// } else
+// assert(0);
+ }
+
-MemoryControl::MemoryControl (AbstractChip* chip_ptr, int version) {
- m_chip_ptr = chip_ptr;
- m_version = version;
+///////
+ //m_version = version;
m_msg_counter = 0;
m_debug = 0;
//if (m_version == 0) m_debug = 1;
- m_mem_bus_cycle_multiplier = RubyConfig::memBusCycleMultiplier();
- m_banks_per_rank = RubyConfig::banksPerRank();
- m_ranks_per_dimm = RubyConfig::ranksPerDimm();
- m_dimms_per_channel = RubyConfig::dimmsPerChannel();
- m_bank_bit_0 = RubyConfig::bankBit0();
- m_rank_bit_0 = RubyConfig::rankBit0();
- m_dimm_bit_0 = RubyConfig::dimmBit0();
- m_bank_queue_size = RubyConfig::bankQueueSize();
- m_bank_busy_time = RubyConfig::bankBusyTime();
- m_rank_rank_delay = RubyConfig::rankRankDelay();
- m_read_write_delay = RubyConfig::readWriteDelay();
- m_basic_bus_busy_time = RubyConfig::basicBusBusyTime();
- m_mem_ctl_latency = RubyConfig::memCtlLatency();
- m_refresh_period = RubyConfig::refreshPeriod();
- m_memRandomArbitrate = RubyConfig::memRandomArbitrate();
- m_tFaw = RubyConfig::tFaw();
- m_memFixedDelay = RubyConfig::memFixedDelay();
assert(m_tFaw <= 62); // must fit in a uint64 shift register
@@ -257,13 +291,15 @@ void MemoryControl::enqueueMemRef (MemoryNode& memRef) {
Time arrival_time = memRef.m_time;
uint64 at = arrival_time;
bool is_mem_read = memRef.m_is_mem_read;
+ bool dirtyWB = memRef.m_is_dirty_wb;
physical_address_t addr = memRef.m_addr;
int bank = getBank(addr);
if (m_debug) {
- cprintf("New memory request%7d: %#08x %c arrived at %10d bank =%3x\n",
- m_msg_counter, addr, is_mem_read? 'R':'W', at, bank);
+ printf("New memory request%7d: 0x%08llx %c arrived at %10lld ", m_msg_counter, addr, is_mem_read? 'R':'W', at);
+ printf("bank =%3x\n", bank);
}
- g_system_ptr->getProfiler()->profileMemReq(bank);
+// printf ("m_name is %s \n", m_name.c_str());
+ g_system_ptr->getProfiler()->profileMemReq(m_name, bank);
m_input_queue.push_back(memRef);
if (!m_awakened) {
g_eventQueue_ptr->scheduleEvent(this, 1);
@@ -295,7 +331,7 @@ MemoryNode MemoryControl::peekNode () {
MemoryNode req = m_response_queue.front();
uint64 returnTime = req.m_time;
if (m_debug) {
- cprintf("Old memory request%7d: %#08x %c peeked at %10d\n",
+ printf("Old memory request%7d: 0x%08llx %c peeked at %10lld\n",
req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', returnTime);
}
return req;
@@ -319,10 +355,10 @@ void MemoryControl::printConfig (ostream& out) {
out << "Memory Control " << m_version << ":" << endl;
out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier << endl;
out << " Basic read latency: " << m_mem_ctl_latency << endl;
- if (m_memFixedDelay) {
- out << " Fixed Latency mode: Added cycles = " << m_memFixedDelay << endl;
+ if (m_mem_fixed_delay) {
+ out << " Fixed Latency mode: Added cycles = " << m_mem_fixed_delay << endl;
} else {
- out << " Bank busy time: " << BANK_BUSY_TIME << " memory cycles" << endl;
+ out << " Bank busy time: " << m_bank_busy_time << " memory cycles" << endl;
out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
out << " Dead cycles between reads to different ranks: " << m_rank_rank_delay << endl;
out << " Dead cycle between a read and a write: " << m_read_write_delay << endl;
@@ -336,7 +372,7 @@ void MemoryControl::printConfig (ostream& out) {
out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
out << " Max size of each bank queue: " << m_bank_queue_size << endl;
out << " Refresh period (within one bank): " << m_refresh_period << endl;
- out << " Arbitration randomness: " << m_memRandomArbitrate << endl;
+ out << " Arbitration randomness: " << m_mem_random_arbitrate << endl;
}
@@ -389,20 +425,20 @@ int MemoryControl::getRank (int bank) {
// can be issued this cycle
bool MemoryControl::queueReady (int bank) {
- if ((m_bankBusyCounter[bank] > 0) && !m_memFixedDelay) {
- g_system_ptr->getProfiler()->profileMemBankBusy();
- //if (m_debug) cprintf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
+ if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) {
+ g_system_ptr->getProfiler()->profileMemBankBusy(m_name);
+ //if (m_debug) printf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
return false;
}
- if (m_memRandomArbitrate >= 2) {
- if ((random() % 100) < m_memRandomArbitrate) {
- g_system_ptr->getProfiler()->profileMemRandBusy();
+ if (m_mem_random_arbitrate >= 2) {
+ if ((random() % 100) < m_mem_random_arbitrate) {
+ g_system_ptr->getProfiler()->profileMemRandBusy(m_name);
return false;
}
}
- if (m_memFixedDelay) return true;
+ if (m_mem_fixed_delay) return true;
if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
- g_system_ptr->getProfiler()->profileMemNotOld();
+ g_system_ptr->getProfiler()->profileMemNotOld(m_name);
return false;
}
if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
@@ -411,26 +447,26 @@ bool MemoryControl::queueReady (int bank) {
// a bus wait. This is a little inaccurate since it MIGHT
// have also been blocked waiting for a read-write or a
// read-read instead, but it's pretty close.
- g_system_ptr->getProfiler()->profileMemArbWait(1);
+ g_system_ptr->getProfiler()->profileMemArbWait(m_name, 1);
return false;
}
if (m_busBusyCounter_Basic > 0) {
- g_system_ptr->getProfiler()->profileMemBusBusy();
+ g_system_ptr->getProfiler()->profileMemBusBusy(m_name);
return false;
}
int rank = getRank(bank);
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
- g_system_ptr->getProfiler()->profileMemTfawBusy();
+ g_system_ptr->getProfiler()->profileMemTfawBusy(m_name);
return false;
}
bool write = !m_bankQueues[bank].front().m_is_mem_read;
if (write && (m_busBusyCounter_Write > 0)) {
- g_system_ptr->getProfiler()->profileMemReadWriteBusy();
+ g_system_ptr->getProfiler()->profileMemReadWriteBusy(m_name);
return false;
}
if (!write && (rank != m_busBusy_WhichRank)
&& (m_busBusyCounter_ReadNewRank > 0)) {
- g_system_ptr->getProfiler()->profileMemDataBusBusy();
+ g_system_ptr->getProfiler()->profileMemDataBusBusy(m_name);
return false;
}
return true;
@@ -453,9 +489,9 @@ bool MemoryControl::issueRefresh (int bank) {
//if (m_debug) {
//uint64 current_time = g_eventQueue_ptr->getTime();
- //cprintf(" Refresh bank %3x at %d\n", bank, current_time);
+ //printf(" Refresh bank %3x at %lld\n", bank, current_time);
//}
- g_system_ptr->getProfiler()->profileMemRefresh();
+ g_system_ptr->getProfiler()->profileMemRefresh(m_name);
m_need_refresh--;
m_refresh_bank++;
if (m_refresh_bank >= m_total_banks) m_refresh_bank = 0;
@@ -488,23 +524,23 @@ void MemoryControl::issueRequest (int bank) {
m_bankQueues[bank].pop_front();
if (m_debug) {
uint64 current_time = g_eventQueue_ptr->getTime();
- cprintf(" Mem issue request%7d: %#08x %c at %10d bank =%3x\n",
+ printf(" Mem issue request%7d: 0x%08llx %c at %10lld bank =%3x\n",
req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', current_time, bank);
}
if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
- enqueueToDirectory(req, m_mem_ctl_latency + m_memFixedDelay);
+ enqueueToDirectory(req, m_mem_ctl_latency + m_mem_fixed_delay);
}
m_oldRequest[bank] = 0;
markTfaw(rank);
m_bankBusyCounter[bank] = m_bank_busy_time;
m_busBusy_WhichRank = rank;
if (req.m_is_mem_read) {
- g_system_ptr->getProfiler()->profileMemRead();
+ g_system_ptr->getProfiler()->profileMemRead(m_name);
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time + m_rank_rank_delay;
} else {
- g_system_ptr->getProfiler()->profileMemWrite();
+ g_system_ptr->getProfiler()->profileMemWrite(m_name);
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time;
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
@@ -531,8 +567,8 @@ void MemoryControl::executeCycle () {
}
// After time period expires, latch an indication that we need a refresh.
- // Disable refresh if in memFixedDelay mode.
- if (!m_memFixedDelay) m_refresh_count--;
+ // Disable refresh if in mem_fixed_delay mode.
+ if (!m_mem_fixed_delay) m_refresh_count--;
if (m_refresh_count == 0) {
m_refresh_count = m_refresh_period_system;
assert (m_need_refresh < 10); // Are we overrunning our ability to refresh?
@@ -553,7 +589,7 @@ void MemoryControl::executeCycle () {
}
// If randomness desired, re-randomize round-robin position each cycle
- if (m_memRandomArbitrate) {
+ if (m_mem_random_arbitrate) {
m_roundRobin = random() % m_total_banks;
}
@@ -562,7 +598,7 @@ void MemoryControl::executeCycle () {
// request and issue it. Treat a refresh request as if it
// were at the head of its bank queue. After we issue something,
// keep scanning the queues just to gather statistics about
- // how many are waiting. If in memFixedDelay mode, we can issue
+ // how many are waiting. If in mem_fixed_delay mode, we can issue
// more than one request per cycle.
int queueHeads = 0;
@@ -573,7 +609,7 @@ void MemoryControl::executeCycle () {
issueRefresh(m_roundRobin);
int qs = m_bankQueues[m_roundRobin].size();
if (qs > 1) {
- g_system_ptr->getProfiler()->profileMemBankQ(qs-1);
+ g_system_ptr->getProfiler()->profileMemBankQ(m_name, qs-1);
}
if (qs > 0) {
m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is queued
@@ -581,15 +617,15 @@ void MemoryControl::executeCycle () {
if (queueReady(m_roundRobin)) {
issueRequest(m_roundRobin);
banksIssued++;
- if (m_memFixedDelay) {
- g_system_ptr->getProfiler()->profileMemWaitCycles(m_memFixedDelay);
+ if (m_mem_fixed_delay) {
+ g_system_ptr->getProfiler()->profileMemWaitCycles(m_name, m_mem_fixed_delay);
}
}
}
}
// memWaitCycles is a redundant catch-all for the specific counters in queueReady
- g_system_ptr->getProfiler()->profileMemWaitCycles(queueHeads - banksIssued);
+ g_system_ptr->getProfiler()->profileMemWaitCycles(m_name, queueHeads - banksIssued);
// Check input queue and move anything to bank queues if not full.
// Since this is done here at the end of the cycle, there will always
@@ -606,7 +642,7 @@ void MemoryControl::executeCycle () {
m_input_queue.pop_front();
m_bankQueues[bank].push_back(req);
}
- g_system_ptr->getProfiler()->profileMemInputQ(m_input_queue.size());
+ g_system_ptr->getProfiler()->profileMemInputQ(m_name, m_input_queue.size());
}
}
diff --git a/src/mem/ruby/system/MemoryControl.hh b/src/mem/ruby/system/MemoryControl.hh
index aef2edc5b..3419f8c40 100644
--- a/src/mem/ruby/system/MemoryControl.hh
+++ b/src/mem/ruby/system/MemoryControl.hh
@@ -43,7 +43,6 @@
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/Message.hh"
#include "mem/gems_common/util.hh"
@@ -67,7 +66,8 @@ class MemoryControl : public Consumer, public AbstractMemOrCache {
public:
// Constructors
- MemoryControl (AbstractChip* chip_ptr, int version);
+ MemoryControl(const string & name);
+ void init(const vector<string> & argv);
// Destructor
~MemoryControl ();
@@ -78,8 +78,8 @@ public:
void setConsumer (Consumer* consumer_ptr);
Consumer* getConsumer () { return m_consumer_ptr; };
- void setDescription (const string& name) { m_name = name; };
- string getDescription () { return m_name; };
+ void setDescription (const string& name) { m_description = name; };
+ string getDescription () { return m_description; };
// Called from the directory:
void enqueue (const MsgPtr& message, int latency );
@@ -97,6 +97,12 @@ public:
void print (ostream& out) const;
void setDebug (int debugFlag);
+
+ //added by SS
+ int getBanksPerRank() { return m_banks_per_rank; };
+ int getRanksPerDimm() { return m_ranks_per_dimm; };
+ int getDimmsPerChannel() { return m_dimms_per_channel; }
+
private:
void enqueueToDirectory (MemoryNode req, int latency);
@@ -113,9 +119,9 @@ private:
MemoryControl& operator=(const MemoryControl& obj);
// data members
- AbstractChip* m_chip_ptr;
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
string m_name;
+ string m_description;
int m_version;
int m_msg_counter;
int m_awakened;
@@ -134,9 +140,9 @@ private:
int m_basic_bus_busy_time;
int m_mem_ctl_latency;
int m_refresh_period;
- int m_memRandomArbitrate;
+ int m_mem_random_arbitrate;
int m_tFaw;
- int m_memFixedDelay;
+ int m_mem_fixed_delay;
int m_total_banks;
int m_total_ranks;
diff --git a/src/mem/ruby/system/MemoryVector.hh b/src/mem/ruby/system/MemoryVector.hh
new file mode 100644
index 000000000..c5f3cea7f
--- /dev/null
+++ b/src/mem/ruby/system/MemoryVector.hh
@@ -0,0 +1,81 @@
+
+#ifndef MEMORYVECTOR_H
+#define MEMORYVECTOR_H
+
+#include "mem/ruby/common/Address.hh"
+
+class DirectoryMemory;
+
+/**
+ * MemoryVector holds memory data (DRAM only)
+ */
+class MemoryVector {
+ public:
+ MemoryVector();
+ MemoryVector(uint32 size);
+ ~MemoryVector();
+ friend class DirectoryMemory;
+
+ void setSize(uint32 size); // destructive
+
+ void write(const Address & paddr, uint8* data, int len);
+ uint8* read(const Address & paddr, uint8* data, int len);
+
+ private:
+ uint8* getBlockPtr(const Address & paddr);
+
+ uint32 m_size;
+ uint8* m_vec;
+};
+
+inline
+MemoryVector::MemoryVector()
+{
+ m_size = 0;
+ m_vec = NULL;
+}
+
+inline
+MemoryVector::MemoryVector(uint32 size)
+{
+ m_size = size;
+ m_vec = new uint8[size];
+}
+
+inline
+MemoryVector::~MemoryVector()
+{
+ delete [] m_vec;
+}
+
+inline
+void MemoryVector::setSize(uint32 size)
+{
+ m_size = size;
+ if (m_vec != NULL)
+ delete [] m_vec;
+ m_vec = new uint8[size];
+}
+
+inline
+void MemoryVector::write(const Address & paddr, uint8* data, int len)
+{
+ assert(paddr.getAddress() + len <= m_size);
+ memcpy(m_vec + paddr.getAddress(), data, len);
+}
+
+inline
+uint8* MemoryVector::read(const Address & paddr, uint8* data, int len)
+{
+ assert(paddr.getAddress() + len <= m_size);
+ memcpy(data, m_vec + paddr.getAddress(), len);
+ return data;
+}
+
+inline
+uint8* MemoryVector::getBlockPtr(const Address & paddr)
+{
+ return m_vec + paddr.getAddress();
+}
+
+#endif // MEMORYVECTOR_H
diff --git a/src/mem/ruby/system/NodePersistentTable.cc b/src/mem/ruby/system/NodePersistentTable.cc
deleted file mode 100644
index 4dd5c670f..000000000
--- a/src/mem/ruby/system/NodePersistentTable.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: NodePersistentTable.C 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
- *
- */
-
-#include "mem/ruby/system/NodePersistentTable.hh"
-#include "mem/ruby/common/Set.hh"
-#include "mem/gems_common/Map.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
-#include "mem/gems_common/util.hh"
-
-// randomize so that handoffs are not locality-aware
-// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
-int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-
-
-class NodePersistentTableEntry {
-public:
- Set m_starving;
- Set m_marked;
- Set m_request_to_write;
-};
-
-NodePersistentTable::NodePersistentTable(AbstractChip* chip_ptr, int version)
-{
- m_chip_ptr = chip_ptr;
- m_map_ptr = new Map<Address, NodePersistentTableEntry>;
- m_version = version;
-}
-
-NodePersistentTable::~NodePersistentTable()
-{
- delete m_map_ptr;
- m_map_ptr = NULL;
- m_chip_ptr = NULL;
-}
-
-void NodePersistentTable::persistentRequestLock(const Address& address, NodeID llocker, AccessType type)
-{
-
- // if (locker == m_chip_ptr->getID() )
- // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
-
- NodeID locker = (NodeID) persistent_randomize[llocker];
-
- assert(address == line_address(address));
- if (!m_map_ptr->exist(address)) {
- // Allocate if not present
- NodePersistentTableEntry entry;
- entry.m_starving.add(locker);
- if (type == AccessType_Write) {
- entry.m_request_to_write.add(locker);
- }
- m_map_ptr->add(address, entry);
- } else {
- NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
-
- entry.m_starving.add(locker);
- if (type == AccessType_Write) {
- entry.m_request_to_write.add(locker);
- }
- assert(entry.m_marked.isSubset(entry.m_starving));
- }
-}
-
-void NodePersistentTable::persistentRequestUnlock(const Address& address, NodeID uunlocker)
-{
- // if (unlocker == m_chip_ptr->getID() )
- // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
-
- NodeID unlocker = (NodeID) persistent_randomize[uunlocker];
-
- assert(address == line_address(address));
- assert(m_map_ptr->exist(address));
- NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
- assert(entry.m_marked.isSubset(entry.m_starving));
- entry.m_starving.remove(unlocker);
- entry.m_marked.remove(unlocker);
- entry.m_request_to_write.remove(unlocker);
- assert(entry.m_marked.isSubset(entry.m_starving));
-
- // Deallocate if empty
- if (entry.m_starving.isEmpty()) {
- assert(entry.m_marked.isEmpty());
- m_map_ptr->erase(address);
- }
-}
-
-bool NodePersistentTable::okToIssueStarving(const Address& address) const
-{
- assert(address == line_address(address));
- if (!m_map_ptr->exist(address)) {
- return true; // No entry present
- } else if (m_map_ptr->lookup(address).m_starving.isElement(m_chip_ptr->getID())) {
- return false; // We can't issue another lockdown until are previous unlock has occurred
- } else {
- return (m_map_ptr->lookup(address).m_marked.isEmpty());
- }
-}
-
-NodeID NodePersistentTable::findSmallest(const Address& address) const
-{
- assert(address == line_address(address));
- assert(m_map_ptr->exist(address));
- const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
- return (NodeID) persistent_randomize[entry.m_starving.smallestElement()];
-}
-
-AccessType NodePersistentTable::typeOfSmallest(const Address& address) const
-{
- assert(address == line_address(address));
- assert(m_map_ptr->exist(address));
- const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- if (entry.m_request_to_write.isElement(entry.m_starving.smallestElement())) {
- return AccessType_Write;
- } else {
- return AccessType_Read;
- }
-}
-
-void NodePersistentTable::markEntries(const Address& address)
-{
- assert(address == line_address(address));
- if (m_map_ptr->exist(address)) {
- NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- assert(entry.m_marked.isEmpty()); // None should be marked
- entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
- }
-}
-
-bool NodePersistentTable::isLocked(const Address& address) const
-{
- assert(address == line_address(address));
- // If an entry is present, it must be locked
- return (m_map_ptr->exist(address));
-}
-
-int NodePersistentTable::countStarvingForAddress(const Address& address) const
-{
- if (m_map_ptr->exist(address)) {
- NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- return (entry.m_starving.count());
- }
- else {
- return 0;
- }
-}
-
-int NodePersistentTable::countReadStarvingForAddress(const Address& address) const
-{
- if (m_map_ptr->exist(address)) {
- NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
- return (entry.m_starving.count() - entry.m_request_to_write.count());
- }
- else {
- return 0;
- }
-}
-
-
diff --git a/src/mem/ruby/system/NodePersistentTable.hh b/src/mem/ruby/system/NodePersistentTable.hh
deleted file mode 100644
index d731b25ae..000000000
--- a/src/mem/ruby/system/NodePersistentTable.hh
+++ /dev/null
@@ -1,99 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: NodePersistentTable.hh 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
- *
- * Description:
- *
- */
-
-#ifndef NodePersistentTable_H
-#define NodePersistentTable_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/protocol/AccessType.hh"
-
-class AbstractChip;
-
-template <class KEY_TYPE, class VALUE_TYPE> class Map;
-class Address;
-class NodePersistentTableEntry;
-
-class NodePersistentTable {
-public:
- // Constructors
- NodePersistentTable(AbstractChip* chip_ptr, int version);
-
- // Destructor
- ~NodePersistentTable();
-
- // Public Methods
- void persistentRequestLock(const Address& address, NodeID locker, AccessType type);
- void persistentRequestUnlock(const Address& address, NodeID unlocker);
- bool okToIssueStarving(const Address& address) const;
- NodeID findSmallest(const Address& address) const;
- AccessType typeOfSmallest(const Address& address) const;
- void markEntries(const Address& address);
- bool isLocked(const Address& addr) const;
- int countStarvingForAddress(const Address& addr) const;
- int countReadStarvingForAddress(const Address& addr) const;
-
- static void printConfig(ostream& out) {}
-
- void print(ostream& out) const;
-private:
- // Private Methods
-
- // Private copy constructor and assignment operator
- NodePersistentTable(const NodePersistentTable& obj);
- NodePersistentTable& operator=(const NodePersistentTable& obj);
-
- // Data Members (m_prefix)
- Map<Address, NodePersistentTableEntry>* m_map_ptr;
- AbstractChip* m_chip_ptr;
- int m_version;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const NodePersistentTable& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const NodePersistentTable& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //NodePersistentTable_H
diff --git a/src/mem/ruby/system/PersistentArbiter.cc b/src/mem/ruby/system/PersistentArbiter.cc
deleted file mode 100644
index b44393301..000000000
--- a/src/mem/ruby/system/PersistentArbiter.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "mem/ruby/system/PersistentArbiter.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
-#include "mem/gems_common/util.hh"
-
-PersistentArbiter::PersistentArbiter(AbstractChip* chip_ptr)
-{
- m_chip_ptr = chip_ptr;
-
- // wastes entries, but who cares
- m_entries.setSize(RubyConfig::numberOfProcessors());
-
- for (int i = 0; i < m_entries.size(); i++) {
- m_entries[i].valid = false;
- }
-
- m_busy = false;
- m_locker = -1;
-
-}
-
-PersistentArbiter::~PersistentArbiter()
-{
- m_chip_ptr = NULL;
-}
-
-
-void PersistentArbiter::addLocker(NodeID id, Address addr, AccessType type) {
- //cout << "Arbiter " << getArbiterId() << " adding locker " << id << " " << addr << endl;
- assert(m_entries[id].valid == false);
- m_entries[id].valid = true;
- m_entries[id].address = addr;
- m_entries[id].type = type;
- m_entries[id].localId = id;
-
-}
-
-void PersistentArbiter::removeLocker(NodeID id) {
- //cout << "Arbiter " << getArbiterId() << " removing locker " << id << " " << m_entries[id].address << endl;
- assert(m_entries[id].valid == true);
- m_entries[id].valid = false;
-
- if (!lockersExist()) {
- m_busy = false;
- }
-}
-
-bool PersistentArbiter::successorRequestPresent(Address addr, NodeID id) {
- for (int i = (id + 1); i < m_entries.size(); i++) {
- if (m_entries[i].address == addr && m_entries[i].valid) {
- //cout << "m_entries[" << id << ", address " << m_entries[id].address << " is equal to " << addr << endl;
- return true;
- }
- }
- return false;
-}
-
-bool PersistentArbiter::lockersExist() {
- for (int i = 0; i < m_entries.size(); i++) {
- if (m_entries[i].valid == true) {
- return true;
- }
- }
- //cout << "no lockers found" << endl;
- return false;
-}
-
-void PersistentArbiter::advanceActiveLock() {
- assert(lockersExist());
-
- //cout << "arbiter advancing lock from " << m_locker;
- m_busy = false;
-
- if (m_locker < (m_entries.size() - 1)) {
- for (int i = (m_locker+1); i < m_entries.size(); i++) {
- if (m_entries[i].valid == true) {
- m_locker = i;
- m_busy = true;
- //cout << " to " << m_locker << endl;
- return;
- }
- }
- }
-
- if (!m_busy) {
- for (int i = 0; i < m_entries.size(); i++) {
- if (m_entries[i].valid == true) {
- m_locker = i;
- m_busy = true;
- //cout << " to " << m_locker << endl;
- return;
- }
- }
-
- assert(m_busy)
- }
-}
-
-Address PersistentArbiter::getActiveLockAddress() {
- assert( m_entries[m_locker].valid = true );
- return m_entries[m_locker].address;
-}
-
-
-NodeID PersistentArbiter::getArbiterId() {
- return m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip();
-}
-
-bool PersistentArbiter::isBusy() {
- return m_busy;
-}
-
-NodeID PersistentArbiter::getActiveLocalId() {
- assert( m_entries[m_locker].valid = true );
- return m_entries[m_locker].localId;
-}
-
-void PersistentArbiter::setIssuedAddress(Address addr) {
- m_issued_address = addr;
-}
-
-bool PersistentArbiter::isIssuedAddress(Address addr) {
- return (m_issued_address == addr);
-}
-
-void PersistentArbiter::print(ostream& out) const {
-
- out << "[";
- for (int i = 0; i < m_entries.size(); i++) {
- if (m_entries[i].valid == true) {
- out << "( " << m_entries[i].localId << ", " << m_entries[i].address << ") ";
- }
- }
- out << "]" << endl;
-
-}
diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc
deleted file mode 100644
index 7f07251ce..000000000
--- a/src/mem/ruby/system/PersistentTable.cc
+++ /dev/null
@@ -1,194 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/system/PersistentTable.hh"
-#include "mem/ruby/common/NetDest.hh"
-#include "mem/gems_common/Map.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
-#include "mem/gems_common/util.hh"
-
-// randomize so that handoffs are not locality-aware
-// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
-// int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-
-
-class PersistentTableEntry {
-public:
- NetDest m_starving;
- NetDest m_marked;
- NetDest m_request_to_write;
-};
-
-PersistentTable::PersistentTable(AbstractChip* chip_ptr, int version)
-{
- m_chip_ptr = chip_ptr;
- m_map_ptr = new Map<Address, PersistentTableEntry>;
- m_version = version;
-}
-
-PersistentTable::~PersistentTable()
-{
- delete m_map_ptr;
- m_map_ptr = NULL;
- m_chip_ptr = NULL;
-}
-
-void PersistentTable::persistentRequestLock(const Address& address, MachineID locker, AccessType type)
-{
-
- // if (locker == m_chip_ptr->getID() )
- // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
-
- // MachineID locker = (MachineID) persistent_randomize[llocker];
-
- assert(address == line_address(address));
- if (!m_map_ptr->exist(address)) {
- // Allocate if not present
- PersistentTableEntry entry;
- entry.m_starving.add(locker);
- if (type == AccessType_Write) {
- entry.m_request_to_write.add(locker);
- }
- m_map_ptr->add(address, entry);
- } else {
- PersistentTableEntry& entry = m_map_ptr->lookup(address);
- assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
-
- entry.m_starving.add(locker);
- if (type == AccessType_Write) {
- entry.m_request_to_write.add(locker);
- }
- assert(entry.m_marked.isSubset(entry.m_starving));
- }
-}
-
-void PersistentTable::persistentRequestUnlock(const Address& address, MachineID unlocker)
-{
- // if (unlocker == m_chip_ptr->getID() )
- // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
-
- // MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
-
- assert(address == line_address(address));
- assert(m_map_ptr->exist(address));
- PersistentTableEntry& entry = m_map_ptr->lookup(address);
- assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
- assert(entry.m_marked.isSubset(entry.m_starving));
- entry.m_starving.remove(unlocker);
- entry.m_marked.remove(unlocker);
- entry.m_request_to_write.remove(unlocker);
- assert(entry.m_marked.isSubset(entry.m_starving));
-
- // Deallocate if empty
- if (entry.m_starving.isEmpty()) {
- assert(entry.m_marked.isEmpty());
- m_map_ptr->erase(address);
- }
-}
-
-bool PersistentTable::okToIssueStarving(const Address& address) const
-{
- assert(address == line_address(address));
- if (!m_map_ptr->exist(address)) {
- return true; // No entry present
- } else if (m_map_ptr->lookup(address).m_starving.isElement( (MachineID) {MachineType_L1Cache, m_version})) {
- return false; // We can't issue another lockdown until are previous unlock has occurred
- } else {
- return (m_map_ptr->lookup(address).m_marked.isEmpty());
- }
-}
-
-MachineID PersistentTable::findSmallest(const Address& address) const
-{
- assert(address == line_address(address));
- assert(m_map_ptr->exist(address));
- const PersistentTableEntry& entry = m_map_ptr->lookup(address);
- // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
- // return (MachineID) persistent_randomize[entry.m_starving.smallestElement()];
- return (MachineID) { MachineType_L1Cache, entry.m_starving.smallestElement() };
-}
-
-AccessType PersistentTable::typeOfSmallest(const Address& address) const
-{
- assert(address == line_address(address));
- assert(m_map_ptr->exist(address));
- const PersistentTableEntry& entry = m_map_ptr->lookup(address);
- if (entry.m_request_to_write.isElement((MachineID) {MachineType_L1Cache, entry.m_starving.smallestElement()})) {
- return AccessType_Write;
- } else {
- return AccessType_Read;
- }
-}
-
-void PersistentTable::markEntries(const Address& address)
-{
- assert(address == line_address(address));
- if (m_map_ptr->exist(address)) {
- PersistentTableEntry& entry = m_map_ptr->lookup(address);
- assert(entry.m_marked.isEmpty()); // None should be marked
- entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
- }
-}
-
-bool PersistentTable::isLocked(const Address& address) const
-{
- assert(address == line_address(address));
- // If an entry is present, it must be locked
- return (m_map_ptr->exist(address));
-}
-
-int PersistentTable::countStarvingForAddress(const Address& address) const
-{
- if (m_map_ptr->exist(address)) {
- PersistentTableEntry& entry = m_map_ptr->lookup(address);
- return (entry.m_starving.count());
- }
- else {
- return 0;
- }
-}
-
-int PersistentTable::countReadStarvingForAddress(const Address& address) const
-{
- if (m_map_ptr->exist(address)) {
- PersistentTableEntry& entry = m_map_ptr->lookup(address);
- return (entry.m_starving.count() - entry.m_request_to_write.count());
- }
- else {
- return 0;
- }
-}
-
-
diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh
deleted file mode 100644
index 9f2e38fd7..000000000
--- a/src/mem/ruby/system/PersistentTable.hh
+++ /dev/null
@@ -1,99 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef PersistentTable_H
-#define PersistentTable_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/MachineID.hh"
-#include "mem/protocol/AccessType.hh"
-
-class AbstractChip;
-
-template <class KEY_TYPE, class VALUE_TYPE> class Map;
-class Address;
-class PersistentTableEntry;
-
-class PersistentTable {
-public:
- // Constructors
- PersistentTable(AbstractChip* chip_ptr, int version);
-
- // Destructor
- ~PersistentTable();
-
- // Public Methods
- void persistentRequestLock(const Address& address, MachineID locker, AccessType type);
- void persistentRequestUnlock(const Address& address, MachineID unlocker);
- bool okToIssueStarving(const Address& address) const;
- MachineID findSmallest(const Address& address) const;
- AccessType typeOfSmallest(const Address& address) const;
- void markEntries(const Address& address);
- bool isLocked(const Address& addr) const;
- int countStarvingForAddress(const Address& addr) const;
- int countReadStarvingForAddress(const Address& addr) const;
-
- static void printConfig(ostream& out) {}
-
- void print(ostream& out) const;
-private:
- // Private Methods
-
- // Private copy constructor and assignment operator
- PersistentTable(const PersistentTable& obj);
- PersistentTable& operator=(const PersistentTable& obj);
-
- // Data Members (m_prefix)
- Map<Address, PersistentTableEntry>* m_map_ptr;
- AbstractChip* m_chip_ptr;
- int m_version;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const PersistentTable& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const PersistentTable& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //PersistentTable_H
diff --git a/src/mem/ruby/system/ProcessorInterface.hh b/src/mem/ruby/system/ProcessorInterface.hh
new file mode 100644
index 000000000..d76e29f65
--- /dev/null
+++ b/src/mem/ruby/system/ProcessorInterface.hh
@@ -0,0 +1,45 @@
+
+struct ProcessorRequest {
+ vector<CacheRequest*> cache_requests;
+};
+
+class ProcessorInterface {
+
+public:
+
+ void read_atomic(const Address & paddr, void* data, int len) {
+ assert(paddr.getLineAddress() + RubyConfig::dataBlockBytes() >= paddr + len);
+ // for now, atomics can't span two blocks. Maybe fix this later
+ }
+
+ void read(const Address & paddr, const Address & rip, AccessModeType atype, void* data, const int len) {
+
+ // create the CacheRequests
+ ProcessorRequest* this_request = new ProcessorRequest;
+ Address split_addr = paddr;
+ int len_remaining = len;
+ while (split_addr.getAddress() < paddr.getAddress() + len) {
+ int split_len = (split_addr.getAddress() + len_remaining <= split_addr.getLineAddress() + RubyConfig::dataBlockBytes()) ?
+ len_remaining :
+ RubyConfig::dataBlockBytes() - split_addr.getOffset();
+ CacheRequest creq = new CacheRequest( line_address(split_addr),
+ split_addr,
+ CacheRequestType_LD,
+ rip,
+ atype,
+ split_len,
+ PretchBit_No,
+ laddr,
+ 0); // SMT thread id);
+ this_request->cache_requests.push_back(creq);
+ split_addr += split_len;
+ len_remaining -= split_len;
+ }
+ outstanding_requests.push_back(this_request);
+
+ }
+
+private:
+ vector<ProcessorRequest*> outstanding_requests;
+ Sequencer* m_sequencer;
+};
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
new file mode 100644
index 000000000..2a5c5f479
--- /dev/null
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -0,0 +1,5 @@
+
+#include "mem/ruby/system/RubyPort.hh"
+
+//void (*RubyPort::m_hit_callback)(int64_t) = NULL;
+uint16_t RubyPort::m_num_ports = 0;
diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh
new file mode 100644
index 000000000..2f391070f
--- /dev/null
+++ b/src/mem/ruby/system/RubyPort.hh
@@ -0,0 +1,60 @@
+#ifndef RUBYPORT_H
+#define RUBYPORT_H
+
+#include "mem/ruby/libruby.hh"
+#include <string>
+#include <assert.h>
+
+using namespace std;
+
+class RubyPort {
+public:
+ RubyPort(const string & name)
+ : m_name(name)
+ {
+ m_port_id = m_num_ports++;
+ m_request_cnt = 0;
+ m_hit_callback = NULL;
+ assert(m_num_ports <= 2048); // see below for reason
+ }
+ virtual ~RubyPort() {}
+
+ virtual int64_t makeRequest(const RubyRequest & request) = 0;
+
+ void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
+ assert(m_hit_callback == NULL); // can't assign hit_callback twice
+ m_hit_callback = hit_callback;
+ }
+
+protected:
+ const string m_name;
+ void (*m_hit_callback)(int64_t);
+
+ int64_t makeUniqueRequestID() {
+ // The request ID is generated by combining the port ID with a request count
+ // so that request IDs can be formed concurrently by multiple threads.
+ // IDs are formed as follows:
+ //
+ //
+ // 0 PortID Request Count
+ // +----+---------------+-----------------------------------------------------+
+ // | 63 | 62-48 | 47-0 |
+ // +----+---------------+-----------------------------------------------------+
+ //
+ //
+ // This limits the system to a maximum of 2^11 == 2048 components
+ // and 2^48 ~= 3x10^14 requests per component
+
+ int64_t id = (static_cast<uint64_t>(m_port_id) << 48) | m_request_cnt;
+ m_request_cnt++;
+ // assert((m_request_cnt & (1<<48)) == 0);
+ return id;
+ }
+
+private:
+ static uint16_t m_num_ports;
+ uint16_t m_port_id;
+ uint64_t m_request_cnt;
+};
+
+#endif
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 82eef2901..ff5ce1506 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -27,909 +27,424 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
- *
- */
-
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/CacheMemory.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-//#include "mem/ruby/recorder/Tracer.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
-#include "mem/protocol/Chip.hh"
-#include "mem/ruby/tester/Tester.hh"
+#include "mem/protocol/CacheMsg.hh"
+#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/gems_common/Map.hh"
-#include "mem/packet.hh"
+#include "mem/ruby/buffers/MessageBuffer.hh"
+#include "mem/ruby/slicc_interface/AbstractController.hh"
-Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
- m_chip_ptr = chip_ptr;
- m_version = version;
+//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
+Sequencer::Sequencer(const string & name)
+ :RubyPort(name)
+{
+}
+
+void Sequencer::init(const vector<string> & argv)
+{
m_deadlock_check_scheduled = false;
m_outstanding_count = 0;
- int smt_threads = RubyConfig::numberofSMTThreads();
- m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
- m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
-
- m_packetTable_ptr = new Map<Address, Packet*>;
-
- for(int p=0; p < smt_threads; ++p){
- m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
- m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ m_max_outstanding_requests = 0;
+ m_deadlock_threshold = 0;
+ m_version = -1;
+ m_instCache_ptr = NULL;
+ m_dataCache_ptr = NULL;
+ m_controller = NULL;
+ for (size_t i=0; i<argv.size(); i+=2) {
+ if ( argv[i] == "controller") {
+ m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
+ m_mandatory_q_ptr = m_controller->getMandatoryQueue();
+ } else if ( argv[i] == "icache")
+ m_instCache_ptr = RubySystem::getCache(argv[i+1]);
+ else if ( argv[i] == "dcache")
+ m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
+ else if ( argv[i] == "version")
+ m_version = atoi(argv[i+1].c_str());
+ else if ( argv[i] == "max_outstanding_requests")
+ m_max_outstanding_requests = atoi(argv[i+1].c_str());
+ else if ( argv[i] == "deadlock_threshold")
+ m_deadlock_threshold = atoi(argv[i+1].c_str());
+ else {
+ cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
+ assert(false);
+ }
}
-
+ assert(m_max_outstanding_requests > 0);
+ assert(m_deadlock_threshold > 0);
+ assert(m_version > -1);
+ assert(m_instCache_ptr != NULL);
+ assert(m_dataCache_ptr != NULL);
+ assert(m_controller != NULL);
}
Sequencer::~Sequencer() {
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int i=0; i < smt_threads; ++i){
- if(m_writeRequestTable_ptr[i]){
- delete m_writeRequestTable_ptr[i];
- }
- if(m_readRequestTable_ptr[i]){
- delete m_readRequestTable_ptr[i];
- }
- }
- if(m_writeRequestTable_ptr){
- delete [] m_writeRequestTable_ptr;
- }
- if(m_readRequestTable_ptr){
- delete [] m_readRequestTable_ptr;
- }
+
}
void Sequencer::wakeup() {
// Check for deadlock of any of the requests
Time current_time = g_eventQueue_ptr->getTime();
- bool deadlock = false;
// Check across all outstanding requests
- int smt_threads = RubyConfig::numberofSMTThreads();
int total_outstanding = 0;
- for(int p=0; p < smt_threads; ++p){
- Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
- for (int i=0; i<keys.size(); i++) {
- CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
- if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
- WARN_MSG("Possible Deadlock detected");
- WARN_EXPR(request);
- WARN_EXPR(m_chip_ptr->getID());
- WARN_EXPR(m_version);
- WARN_EXPR(keys.size());
- WARN_EXPR(current_time);
- WARN_EXPR(request.getTime());
- WARN_EXPR(current_time - request.getTime());
- WARN_EXPR(*m_readRequestTable_ptr[p]);
- ERROR_MSG("Aborting");
- deadlock = true;
- }
- }
- keys = m_writeRequestTable_ptr[p]->keys();
- for (int i=0; i<keys.size(); i++) {
- CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
- if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
- WARN_MSG("Possible Deadlock detected");
- WARN_EXPR(request);
- WARN_EXPR(m_chip_ptr->getID());
- WARN_EXPR(m_version);
- WARN_EXPR(current_time);
- WARN_EXPR(request.getTime());
- WARN_EXPR(current_time - request.getTime());
- WARN_EXPR(keys.size());
- WARN_EXPR(*m_writeRequestTable_ptr[p]);
- ERROR_MSG("Aborting");
- deadlock = true;
- }
- }
- total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
- } // across all request tables
+ Vector<Address> keys = m_readRequestTable.keys();
+ for (int i=0; i<keys.size(); i++) {
+ SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
+ if (current_time - request->issue_time >= m_deadlock_threshold) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_version);
+ WARN_EXPR(keys.size());
+ WARN_EXPR(current_time);
+ WARN_EXPR(request->issue_time);
+ WARN_EXPR(current_time - request->issue_time);
+ ERROR_MSG("Aborting");
+ }
+ }
+
+ keys = m_writeRequestTable.keys();
+ for (int i=0; i<keys.size(); i++) {
+ SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
+ if (current_time - request->issue_time >= m_deadlock_threshold) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_version);
+ WARN_EXPR(current_time);
+ WARN_EXPR(request->issue_time);
+ WARN_EXPR(current_time - request->issue_time);
+ WARN_EXPR(keys.size());
+ ERROR_MSG("Aborting");
+ }
+ }
+ total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
+
assert(m_outstanding_count == total_outstanding);
if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
} else {
m_deadlock_check_scheduled = false;
}
}
-//returns the total number of requests
-int Sequencer::getNumberOutstanding(){
- return m_outstanding_count;
-}
-
-// returns the total number of demand requests
-int Sequencer::getNumberOutstandingDemand(){
- int smt_threads = RubyConfig::numberofSMTThreads();
- int total_demand = 0;
- for(int p=0; p < smt_threads; ++p){
- Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
- for (int i=0; i< keys.size(); i++) {
- CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
- if(request.getPrefetch() == PrefetchBit_No){
- total_demand++;
- }
- }
-
- keys = m_writeRequestTable_ptr[p]->keys();
- for (int i=0; i< keys.size(); i++) {
- CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
- if(request.getPrefetch() == PrefetchBit_No){
- total_demand++;
- }
- }
- }
-
- return total_demand;
-}
-
-int Sequencer::getNumberOutstandingPrefetch(){
- int smt_threads = RubyConfig::numberofSMTThreads();
- int total_prefetch = 0;
- for(int p=0; p < smt_threads; ++p){
- Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
- for (int i=0; i< keys.size(); i++) {
- CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
- if(request.getPrefetch() == PrefetchBit_Yes){
- total_prefetch++;
- }
- }
-
- keys = m_writeRequestTable_ptr[p]->keys();
- for (int i=0; i< keys.size(); i++) {
- CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
- if(request.getPrefetch() == PrefetchBit_Yes){
- total_prefetch++;
- }
- }
- }
-
- return total_prefetch;
-}
-
-bool Sequencer::isPrefetchRequest(const Address & lineaddr){
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int p=0; p < smt_threads; ++p){
- // check load requests
- Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
- for (int i=0; i< keys.size(); i++) {
- CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
- if(line_address(request.getAddress()) == lineaddr){
- if(request.getPrefetch() == PrefetchBit_Yes){
- return true;
- }
- else{
- return false;
- }
- }
- }
-
- // check store requests
- keys = m_writeRequestTable_ptr[p]->keys();
- for (int i=0; i< keys.size(); i++) {
- CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
- if(line_address(request.getAddress()) == lineaddr){
- if(request.getPrefetch() == PrefetchBit_Yes){
- return true;
- }
- else{
- return false;
- }
- }
- }
- }
- // we should've found a matching request
- cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
- printProgress(cout);
- assert(0);
-}
-
-AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
- if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
- CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
- return request.getAccessMode();
- } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
- CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
- return request.getAccessMode();
- } else {
- printProgress(cout);
- ERROR_MSG("Request not found in RequestTables");
- }
-}
-
-Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
- assert(thread >= 0);
- if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
- CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
- return request.getLogicalAddress();
- } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
- CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
- return request.getLogicalAddress();
- } else {
- printProgress(cout);
- WARN_MSG("Request not found in RequestTables");
- WARN_MSG(addr);
- WARN_MSG(thread);
- ASSERT(0);
- }
-}
-
-// returns the ThreadID of the request
-int Sequencer::getRequestThreadID(const Address & addr){
- int smt_threads = RubyConfig::numberofSMTThreads();
- int thread = -1;
- int num_found = 0;
- for(int p=0; p < smt_threads; ++p){
- if(m_readRequestTable_ptr[p]->exist(addr)){
- num_found++;
- thread = p;
- }
- if(m_writeRequestTable_ptr[p]->exist(addr)){
- num_found++;
- thread = p;
- }
- }
- if(num_found != 1){
- cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
- printProgress(cout);
- }
- ASSERT(num_found == 1);
- ASSERT(thread != -1);
-
- return thread;
-}
-
-// given a line address, return the request's physical address
-Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
- int smt_threads = RubyConfig::numberofSMTThreads();
- Address physaddr;
- int num_found = 0;
- for(int p=0; p < smt_threads; ++p){
- if(m_readRequestTable_ptr[p]->exist(lineaddr)){
- num_found++;
- physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
- }
- if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
- num_found++;
- physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
- }
- }
- if(num_found != 1){
- cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
- printProgress(cout);
- }
- ASSERT(num_found == 1);
-
- return physaddr;
-}
-
void Sequencer::printProgress(ostream& out) const{
-
+ /*
int total_demand = 0;
out << "Sequencer Stats Version " << m_version << endl;
out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
out << "---------------" << endl;
out << "outstanding requests" << endl;
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int p=0; p < smt_threads; ++p){
- Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
- int read_size = rkeys.size();
- out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
- // print the request table
- for(int i=0; i < read_size; ++i){
- CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
- out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
- if( request.getPrefetch() == PrefetchBit_No ){
- total_demand++;
- }
- }
-
- Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
- int write_size = wkeys.size();
- out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
- // print the request table
- for(int i=0; i < write_size; ++i){
- CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
+ Vector<Address> rkeys = m_readRequestTable.keys();
+ int read_size = rkeys.size();
+ out << "proc " << m_version << " Read Requests = " << read_size << endl;
+ // print the request table
+ for(int i=0; i < read_size; ++i){
+ SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
+ total_demand++;
+ }
+
+ Vector<Address> wkeys = m_writeRequestTable.keys();
+ int write_size = wkeys.size();
+ out << "proc " << m_version << " Write Requests = " << write_size << endl;
+ // print the request table
+ for(int i=0; i < write_size; ++i){
+ CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
if( request.getPrefetch() == PrefetchBit_No ){
total_demand++;
}
- }
-
- out << endl;
}
+
+ out << endl;
+
out << "Total Number Outstanding: " << m_outstanding_count << endl;
out << "Total Number Demand : " << total_demand << endl;
out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
out << endl;
out << endl;
-
+ */
}
-void Sequencer::printConfig(ostream& out) {
- if (TSO) {
- out << "sequencer: Sequencer - TSO" << endl;
- } else {
- out << "sequencer: Sequencer - SC" << endl;
- }
- out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
-}
-
-bool Sequencer::empty() const {
- return m_outstanding_count == 0;
+void Sequencer::printConfig(ostream& out) const {
+ out << "Seqeuncer config: " << m_name << endl;
+ out << " controller: " << m_controller->getName() << endl;
+ out << " version: " << m_version << endl;
+ out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
+ out << " deadlock_threshold: " << m_deadlock_threshold << endl;
}
// Insert the request on the correct request table. Return true if
// the entry was already present.
-bool Sequencer::insertRequest(const CacheMsg& request) {
- int thread = request.getThreadID();
- assert(thread >= 0);
- int total_outstanding = 0;
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int p=0; p < smt_threads; ++p){
- total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
- }
+bool Sequencer::insertRequest(SequencerRequest* request) {
+ int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
+
assert(m_outstanding_count == total_outstanding);
// See if we should schedule a deadlock check
if (m_deadlock_check_scheduled == false) {
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
m_deadlock_check_scheduled = true;
}
- if ((request.getType() == CacheRequestType_ST) ||
- (request.getType() == CacheRequestType_ATOMIC)) {
- if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
- m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
- return true;
+ Address line_addr(request->ruby_request.paddr);
+ line_addr.makeLineAddress();
+ if ((request->ruby_request.type == RubyRequestType_ST) ||
+ (request->ruby_request.type == RubyRequestType_RMW)) {
+ if (m_writeRequestTable.exist(line_addr)) {
+ m_writeRequestTable.lookup(line_addr) = request;
+ // return true;
+ assert(0); // drh5: isn't this an error? do you lose the initial request?
}
- m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
- m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_writeRequestTable.allocate(line_addr);
+ m_writeRequestTable.lookup(line_addr) = request;
m_outstanding_count++;
} else {
- if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
- m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
- return true;
+ if (m_readRequestTable.exist(line_addr)) {
+ m_readRequestTable.lookup(line_addr) = request;
+ // return true;
+ assert(0); // drh5: isn't this an error? do you lose the initial request?
}
- m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
- m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_readRequestTable.allocate(line_addr);
+ m_readRequestTable.lookup(line_addr) = request;
m_outstanding_count++;
}
g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
- total_outstanding = 0;
- for(int p=0; p < smt_threads; ++p){
- total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
- }
-
+ total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
+
return false;
}
-void Sequencer::removeRequest(const CacheMsg& request) {
- int thread = request.getThreadID();
- assert(thread >= 0);
- int total_outstanding = 0;
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int p=0; p < smt_threads; ++p){
- total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
- }
- assert(m_outstanding_count == total_outstanding);
+void Sequencer::removeRequest(SequencerRequest* srequest) {
+
+ assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
- if ((request.getType() == CacheRequestType_ST) ||
- (request.getType() == CacheRequestType_ATOMIC)) {
- m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ const RubyRequest & ruby_request = srequest->ruby_request;
+ Address line_addr(ruby_request.paddr);
+ line_addr.makeLineAddress();
+ if ((ruby_request.type == RubyRequestType_ST) ||
+ (ruby_request.type == RubyRequestType_RMW)) {
+ m_writeRequestTable.deallocate(line_addr);
} else {
- m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ m_readRequestTable.deallocate(line_addr);
}
m_outstanding_count--;
- total_outstanding = 0;
- for(int p=0; p < smt_threads; ++p){
- total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
- }
- assert(m_outstanding_count == total_outstanding);
-}
-
-void Sequencer::writeCallback(const Address& address) {
- DataBlock data;
- writeCallback(address, data);
+ assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
}
void Sequencer::writeCallback(const Address& address, DataBlock& data) {
- // process oldest thread first
- int thread = -1;
- Time oldest_time = 0;
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int t=0; t < smt_threads; ++t){
- if(m_writeRequestTable_ptr[t]->exist(address)){
- CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
- if(thread == -1 || (request.getTime() < oldest_time) ){
- thread = t;
- oldest_time = request.getTime();
- }
- }
- }
- // make sure we found an oldest thread
- ASSERT(thread != -1);
-
- CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
-
- writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
-}
-
-void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
assert(address == line_address(address));
- assert(thread >= 0);
- assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ assert(m_writeRequestTable.exist(line_address(address)));
- writeCallback(address, data, respondingMach, thread);
-
-}
-
-void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
- assert(address == line_address(address));
- assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
- CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
- assert( request.getThreadID() == thread);
+ SequencerRequest* request = m_writeRequestTable.lookup(address);
removeRequest(request);
- assert((request.getType() == CacheRequestType_ST) ||
- (request.getType() == CacheRequestType_ATOMIC));
-
- hitCallback(request, data, respondingMach, thread);
+ assert((request->ruby_request.type == RubyRequestType_ST) ||
+ (request->ruby_request.type == RubyRequestType_RMW));
-}
-
-void Sequencer::readCallback(const Address& address) {
- DataBlock data;
- readCallback(address, data);
+ hitCallback(request, data);
}
void Sequencer::readCallback(const Address& address, DataBlock& data) {
- // process oldest thread first
- int thread = -1;
- Time oldest_time = 0;
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int t=0; t < smt_threads; ++t){
- if(m_readRequestTable_ptr[t]->exist(address)){
- CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
- if(thread == -1 || (request.getTime() < oldest_time) ){
- thread = t;
- oldest_time = request.getTime();
- }
- }
- }
- // make sure we found an oldest thread
- ASSERT(thread != -1);
-
- CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
-
- readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
-}
-
-void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
assert(address == line_address(address));
- assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+ assert(m_readRequestTable.exist(line_address(address)));
- readCallback(address, data, respondingMach, thread);
-}
-
-void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
- assert(address == line_address(address));
- assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
-
- CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
- assert( request.getThreadID() == thread );
+ SequencerRequest* request = m_readRequestTable.lookup(address);
removeRequest(request);
- assert((request.getType() == CacheRequestType_LD) ||
- (request.getType() == CacheRequestType_IFETCH)
- );
+ assert((request->ruby_request.type == RubyRequestType_LD) ||
+ (request->ruby_request.type == RubyRequestType_IFETCH));
- hitCallback(request, data, respondingMach, thread);
+ hitCallback(request, data);
}
-void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
- int size = request.getSize();
- Address request_address = request.getAddress();
- Address request_logical_address = request.getLogicalAddress();
- Address request_line_address = line_address(request_address);
- CacheRequestType type = request.getType();
- int threadID = request.getThreadID();
- Time issued_time = request.getTime();
- int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
-
- DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
+ const RubyRequest & ruby_request = srequest->ruby_request;
+ int size = ruby_request.len;
+ Address request_address(ruby_request.paddr);
+ Address request_line_address(ruby_request.paddr);
+ request_line_address.makeLineAddress();
+ RubyRequestType type = ruby_request.type;
+ Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
- if (type == CacheRequestType_IFETCH) {
- if (Protocol::m_TwoLevelCache) {
- if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
- m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
- }
- }
- else {
- if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
- m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
- }
- }
+ if (type == RubyRequestType_IFETCH) {
+ if (m_instCache_ptr->isTagPresent(request_line_address) )
+ m_instCache_ptr->setMRU(request_line_address);
} else {
- if (Protocol::m_TwoLevelCache) {
- if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
- m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
- }
- }
- else {
- if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
- m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
- }
- }
+ if (m_dataCache_ptr->isTagPresent(request_line_address) )
+ m_dataCache_ptr->setMRU(request_line_address);
}
assert(g_eventQueue_ptr->getTime() >= issued_time);
Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
- if (PROTOCOL_DEBUG_TRACE) {
- g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
- int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
- }
-
- DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
- DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
- if (request.getPrefetch() == PrefetchBit_Yes) {
- DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
- g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
- return; // Ignore the software prefetch, don't callback the driver
- }
-
// Profile the miss latency for all non-zero demand misses
if (miss_latency != 0) {
- g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
-
- }
-
- bool write =
- (type == CacheRequestType_ST) ||
- (type == CacheRequestType_ATOMIC);
+ g_system_ptr->getProfiler()->missLatency(miss_latency, type);
- if (TSO && write) {
- m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data,
- m_packetTable_ptr->lookup(request.getAddress()));
- } else {
-
- // Copy the correct bytes out of the cache line into the subblock
- SubBlock subblock(request_address, request_logical_address, size);
- subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
-
- // Scan the store buffer to see if there are any outstanding stores we need to collect
- if (TSO) {
- m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
+ if (Debug::getProtocolTrace()) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
+ "", "Done", "", int_to_string(miss_latency)+" cycles");
}
+ }
+ /*
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ return; // Ignore the prefetch
+ }
+ */
- // Call into the Driver and let it read and/or modify the sub-block
- Packet* pkt = m_packetTable_ptr->lookup(request.getAddress());
-
- // update data if this is a store/atomic
-
- /*
- if (pkt->req->isCondSwap()) {
- L1Cache_Entry entry = m_L1Cache_vec[m_version]->lookup(Address(pkt->req->physAddr()));
- DataBlk datablk = entry->getDataBlk();
- uint8_t *orig_data = datablk.getArray();
- if ( datablk.equal(pkt->req->getExtraData()) )
- datablk->setArray(pkt->getData());
- pkt->setData(orig_data);
- }
- */
-
- g_system_ptr->getDriver()->hitCallback(pkt);
- m_packetTable_ptr->remove(request.getAddress());
-
- // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
- // (This is only triggered for the non-TSO case)
- if (write) {
- assert(!TSO);
- subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
+ // update the data
+ if (ruby_request.data != NULL) {
+ if ((type == RubyRequestType_LD) ||
+ (type == RubyRequestType_IFETCH)) {
+ memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
+ } else {
+ data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
}
}
-}
-void Sequencer::printDebug(){
- //notify driver of debug
- g_system_ptr->getDriver()->printDebug();
+ m_hit_callback(srequest->id);
+ delete srequest;
}
-//dsm: breaks build, delayed
// Returns true if the sequencer already has a load or store outstanding
-bool
-Sequencer::isReady(const Packet* pkt) const
-{
-
- int cpu_number = pkt->req->contextId();
- la_t logical_addr = pkt->req->getVaddr();
- pa_t physical_addr = pkt->req->getPaddr();
- CacheRequestType type_of_request;
- if ( pkt->req->isInstFetch() ) {
- type_of_request = CacheRequestType_IFETCH;
- } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
- type_of_request = CacheRequestType_ATOMIC;
- } else if ( pkt->isRead() ) {
- type_of_request = CacheRequestType_LD;
- } else if ( pkt->isWrite() ) {
- type_of_request = CacheRequestType_ST;
- } else {
- assert(false);
+bool Sequencer::isReady(const RubyRequest& request) const {
+ // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
+ // to simulate stalling of the front-end
+ // Do we stall all the sequencers? If it is atomic instruction - yes!
+ if (m_outstanding_count >= m_max_outstanding_requests) {
+ return false;
}
- int thread = pkt->req->threadId();
-
- CacheMsg request(Address( physical_addr ),
- Address( physical_addr ),
- type_of_request,
- Address(0),
- AccessModeType_UserMode, // User/supervisor mode
- 0, // Size in bytes of request
- PrefetchBit_No, // Not a prefetch
- 0, // Version number
- Address(logical_addr), // Virtual Address
- thread // SMT thread
- );
- return isReady(request);
-}
-bool
-Sequencer::isReady(const CacheMsg& request) const
-{
- if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
- //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
+ if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
+ m_readRequestTable.exist(line_address(Address(request.paddr))) ){
+ //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
//printProgress(cout);
return false;
}
- // This code allows reads to be performed even when we have a write
- // request outstanding for the line
- bool write =
- (request.getType() == CacheRequestType_ST) ||
- (request.getType() == CacheRequestType_ATOMIC);
-
- // LUKE - disallow more than one request type per address
- // INVARIANT: at most one request type per address, per processor
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int p=0; p < smt_threads; ++p){
- if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
- m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
- //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
- //printProgress(cout);
- return false;
- }
- }
-
- if (TSO) {
- return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
- }
return true;
}
-//dsm: breaks build, delayed
-// Called by Driver (Simics or Tester).
-void
-Sequencer::makeRequest(Packet* pkt)
-{
- int cpu_number = pkt->req->contextId();
- la_t logical_addr = pkt->req->getVaddr();
- pa_t physical_addr = pkt->req->getPaddr();
- int request_size = pkt->getSize();
- CacheRequestType type_of_request;
- PrefetchBit prefetch;
- bool write = false;
- if ( pkt->req->isInstFetch() ) {
- type_of_request = CacheRequestType_IFETCH;
- } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
- type_of_request = CacheRequestType_ATOMIC;
- write = true;
- } else if ( pkt->isRead() ) {
- type_of_request = CacheRequestType_LD;
- } else if ( pkt->isWrite() ) {
- type_of_request = CacheRequestType_ST;
- write = true;
- } else {
- assert(false);
- }
- if (pkt->req->isPrefetch()) {
- prefetch = PrefetchBit_Yes;
- } else {
- prefetch = PrefetchBit_No;
- }
- la_t virtual_pc = pkt->req->getPC();
- int isPriv = false; // TODO: get permission data
- int thread = pkt->req->threadId();
-
- AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
-
- CacheMsg request(Address( physical_addr ),
- Address( physical_addr ),
- type_of_request,
- Address(virtual_pc),
- access_mode, // User/supervisor mode
- request_size, // Size in bytes of request
- prefetch,
- 0, // Version number
- Address(logical_addr), // Virtual Address
- thread // SMT thread
- );
-
- if ( TSO && write && !pkt->req->isPrefetch() ) {
- assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
- m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(pkt, request);
- return;
- }
-
- m_packetTable_ptr->insert(Address( physical_addr ), pkt);
-
- doRequest(request);
-}
-
-bool Sequencer::doRequest(const CacheMsg& request) {
- bool hit = false;
- // Check the fast path
- DataBlock* data_ptr;
-
- int thread = request.getThreadID();
-
- hit = tryCacheAccess(line_address(request.getAddress()),
- request.getType(),
- request.getProgramCounter(),
- request.getAccessMode(),
- request.getSize(),
- data_ptr);
-
- if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
- DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
- hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
- return true;
- }
-
- if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
-
- // See if we can satisfy the load entirely from the store buffer
- SubBlock subblock(line_address(request.getAddress()), request.getSize());
- if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
- DataBlock dummy;
- hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
- return true;
- }
- }
-
- DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
- issueRequest(request);
- return hit;
+bool Sequencer::empty() const {
+ return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
}
-void Sequencer::issueRequest(const CacheMsg& request) {
- bool found = insertRequest(request);
-
- if (!found) {
- CacheMsg msg = request;
- msg.getAddress() = line_address(request.getAddress()); // Make line address
-
- // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
- if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
- g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
- }
-
- if (PROTOCOL_DEBUG_TRACE) {
- g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
- }
-
-#if 0
- // Commented out by nate binkert because I removed the trace stuff
- if (g_system_ptr->getTracer()->traceEnabled()) {
- g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
- msg.getType(), g_eventQueue_ptr->getTime());
- }
-#endif
-
- Time latency = 0; // initialzed to an null value
-
- latency = SEQUENCER_TO_CONTROLLER_LATENCY;
-
- // Send the message to the cache controller
- assert(latency > 0);
- m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
-
- } // !found
+int64_t Sequencer::makeRequest(const RubyRequest & request)
+{
+ assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
+ if (isReady(request)) {
+ int64_t id = makeUniqueRequestID();
+ SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
+ bool found = insertRequest(srequest);
+ if (!found)
+ issueRequest(request);
+
+ // TODO: issue hardware prefetches here
+ return id;
+ }
+ else {
+ return -1;
+ }
+}
+
+void Sequencer::issueRequest(const RubyRequest& request) {
+
+ // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
+ CacheRequestType ctype;
+ switch(request.type) {
+ case RubyRequestType_IFETCH:
+ ctype = CacheRequestType_IFETCH;
+ break;
+ case RubyRequestType_LD:
+ ctype = CacheRequestType_LD;
+ break;
+ case RubyRequestType_ST:
+ ctype = CacheRequestType_ST;
+ break;
+ case RubyRequestType_RMW:
+ ctype = CacheRequestType_ATOMIC;
+ break;
+ default:
+ assert(0);
+ }
+ AccessModeType amtype;
+ switch(request.access_mode){
+ case RubyAccessMode_User:
+ amtype = AccessModeType_UserMode;
+ break;
+ case RubyAccessMode_Supervisor:
+ amtype = AccessModeType_SupervisorMode;
+ break;
+ case RubyAccessMode_Device:
+ amtype = AccessModeType_UserMode;
+ break;
+ default:
+ assert(0);
+ }
+ Address line_addr(request.paddr);
+ line_addr.makeLineAddress();
+ CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
+
+ if (Debug::getProtocolTrace()) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
+ "", "Begin", "", RubyRequestType_to_string(request.type));
+ }
+
+ if (g_system_ptr->getTracer()->traceEnabled()) {
+ g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
+ request.type, g_eventQueue_ptr->getTime());
+ }
+
+ Time latency = 0; // initialzed to an null value
+
+ if (request.type == RubyRequestType_IFETCH)
+ latency = m_instCache_ptr->getLatency();
+ else
+ latency = m_dataCache_ptr->getLatency();
+
+ // Send the message to the cache controller
+ assert(latency > 0);
+
+
+ m_mandatory_q_ptr->enqueue(msg, latency);
}
-
+/*
bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
- const Address& pc, AccessModeType access_mode,
+ AccessModeType access_mode,
int size, DataBlock*& data_ptr) {
if (type == CacheRequestType_IFETCH) {
- if (Protocol::m_TwoLevelCache) {
- return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
- }
- else {
- return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
- }
+ return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
} else {
- if (Protocol::m_TwoLevelCache) {
- return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
- }
- else {
- return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
- }
- }
-}
-
-void Sequencer::resetRequestTime(const Address& addr, int thread){
- assert(thread >= 0);
- //reset both load and store requests, if they exist
- if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
- CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
- if( request.m_AccessMode != AccessModeType_UserMode){
- cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
- printProgress(cout);
- }
- //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
- request.setTime(g_eventQueue_ptr->getTime());
+ return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
}
- if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
- CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
- if( request.m_AccessMode != AccessModeType_UserMode){
- cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
- printProgress(cout);
- }
- //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
- request.setTime(g_eventQueue_ptr->getTime());
- }
-}
-
-// removes load request from queue
-void Sequencer::removeLoadRequest(const Address & addr, int thread){
- removeRequest(getReadRequest(addr, thread));
-}
-
-void Sequencer::removeStoreRequest(const Address & addr, int thread){
- removeRequest(getWriteRequest(addr, thread));
-}
-
-// returns the read CacheMsg
-CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
- Address temp = addr;
- assert(thread >= 0);
- assert(temp == line_address(temp));
- assert(m_readRequestTable_ptr[thread]->exist(addr));
- return m_readRequestTable_ptr[thread]->lookup(addr);
-}
-
-CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
- Address temp = addr;
- assert(thread >= 0);
- assert(temp == line_address(temp));
- assert(m_writeRequestTable_ptr[thread]->exist(addr));
- return m_writeRequestTable_ptr[thread]->lookup(addr);
}
+*/
void Sequencer::print(ostream& out) const {
- out << "[Sequencer: " << m_chip_ptr->getID()
+ out << "[Sequencer: " << m_version
<< ", outstanding requests: " << m_outstanding_count;
- int smt_threads = RubyConfig::numberofSMTThreads();
- for(int p=0; p < smt_threads; ++p){
- out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
- << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
- }
+ out << ", read request table: " << m_readRequestTable
+ << ", write request table: " << m_writeRequestTable;
out << "]";
}
@@ -941,20 +456,183 @@ void Sequencer::checkCoherence(const Address& addr) {
#endif
}
+/*
bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
- unsigned int size_in_bytes ) {
- for(unsigned int i=0; i < size_in_bytes; i++) {
- std::cerr << __FILE__ << "(" << __LINE__ << "): Not implemented. " << std::endl;
- value[i] = 0; // _read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
- // addr.getAddress() + i, 1 );
- }
- return false; // Do nothing?
+ unsigned int size_in_bytes )
+{
+ bool found = false;
+ const Address lineAddr = line_address(addr);
+ DataBlock data;
+ PhysAddress paddr(addr);
+ DataBlock* dataPtr = &data;
+
+ MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
+ int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
+
+ if (Protocol::m_TwoLevelCache) {
+ if(Protocol::m_CMP){
+ assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
+ }
+ else{
+ assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
+ }
+ }
+
+ if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
+ n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
+// ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
+// L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
+
+// int offset = addr.getOffset();
+// for(int i=0; i<size_in_bytes; ++i){
+// value[i] = tbeEntry.getDataBlk().getByte(offset + i);
+// }
+
+// found = true;
+ } else {
+ // Address not found
+ //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
+ n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
+ int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ int offset = addr.getOffset();
+ value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
+ }
+ // Address not found
+ //WARN_MSG("Couldn't find address");
+ //WARN_EXPR(addr);
+ found = false;
+ }
+ return true;
}
bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
unsigned int size_in_bytes) {
char test_buffer[64];
- return false; // Do nothing?
+ // idea here is that coherent cache should find the
+ // latest data, the update it
+ bool found = false;
+ const Address lineAddr = line_address(addr);
+ PhysAddress paddr(addr);
+ DataBlock data;
+ DataBlock* dataPtr = &data;
+ Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
+
+ MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
+ int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
+
+ assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
+ assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
+ if (Protocol::m_TwoLevelCache) {
+ if(Protocol::m_CMP){
+ assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
+ }
+ else{
+ assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
+ }
+ }
+
+ if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
+ n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else {
+ // Address not found
+ n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
+ int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ int offset = addr.getOffset();
+ n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
+ }
+ found = false;
+ }
+
+ if (found){
+ found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
+ assert(found);
+ if(value[0] != test_buffer[0]){
+ WARN_EXPR((int) value[0]);
+ WARN_EXPR((int) test_buffer[0]);
+ ERROR_MSG("setRubyMemoryValue failed to set value.");
+ }
+ }
+
+ return true;
+}
+*/
+/*
+
+void
+Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
+{
+ if ( type == AccessType_Read || type == AccessType_Write ) {
+ // need to break up the packet data
+ uint64 guest_ptr = paddr;
+ Vector<DataBlock*> datablocks;
+ while (paddr + len != guest_ptr) {
+ Address addr(guest_ptr);
+ Address line_addr = line_address(addr);
+
+ int bytes_copied;
+ if (addr.getOffset() == 0) {
+ bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
+ (paddr + len - guest_ptr):
+ RubyConfig::dataBlockBytes();
+ } else {
+ bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
+ if (guest_ptr + bytes_copied > paddr + len)
+ bytes_copied = paddr + len - guest_ptr;
+ }
+
+ // first we need to find all data blocks that have to be updated for a write
+ // and the highest block for a read
+ for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
+ if (Protocol::m_TwoLevelCache){
+ if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
+ datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
+ if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
+ datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
+ } else {
+ if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
+ datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
+ }
+ }
+ if (Protocol::m_TwoLevelCache){
+ int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
+ if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
+ datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
+ }
+ }
+ assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
+ DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
+ Directory_Entry& entry = dir->lookup(line_addr);
+ datablocks.insertAtBottom(&entry.getDataBlk());
+
+ if (pkt->isRead()){
+ datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
+ } else {// pkt->isWrite() {
+ for (int i=0;i<datablocks.size();i++)
+ datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
+ }
+
+ guest_ptr += bytes_copied;
+ pkt_data += bytes_copied;
+ datablocks.clear();
+ }
}
+*/
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
index 1ccfd97ce..254f5a092 100644
--- a/src/mem/ruby/system/Sequencer.hh
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -44,19 +44,31 @@
#include "mem/protocol/AccessModeType.hh"
#include "mem/protocol/GenericMachineType.hh"
#include "mem/protocol/PrefetchBit.hh"
+#include "mem/ruby/system/RubyPort.hh"
#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
class DataBlock;
-class AbstractChip;
class CacheMsg;
-class Address;
class MachineID;
-class Packet;
+class CacheMemory;
+class AbstractController;
-class Sequencer : public Consumer {
+struct SequencerRequest {
+ RubyRequest ruby_request;
+ int64_t id;
+ Time issue_time;
+
+ SequencerRequest(const RubyRequest & _ruby_request, int64_t _id, Time _issue_time)
+ : ruby_request(_ruby_request), id(_id), issue_time(_issue_time)
+ {}
+};
+
+class Sequencer : public Consumer, public RubyPort {
public:
// Constructors
- Sequencer(AbstractChip* chip_ptr, int version);
+ Sequencer(const string & name);
+ void init(const vector<string> & argv);
// Destructor
~Sequencer();
@@ -64,87 +76,53 @@ public:
// Public Methods
void wakeup(); // Used only for deadlock detection
- static void printConfig(ostream& out);
-
- // returns total number of outstanding request (includes prefetches)
- int getNumberOutstanding();
- // return only total number of outstanding demand requests
- int getNumberOutstandingDemand();
- // return only total number of outstanding prefetch requests
- int getNumberOutstandingPrefetch();
-
- // remove load/store request from queue
- void removeLoadRequest(const Address & addr, int thread);
- void removeStoreRequest(const Address & addr, int thread);
+ void printConfig(ostream& out) const;
void printProgress(ostream& out) const;
- // returns a pointer to the request in the request tables
- CacheMsg & getReadRequest( const Address & addr, int thread );
- CacheMsg & getWriteRequest( const Address & addr, int thread );
-
void writeCallback(const Address& address, DataBlock& data);
void readCallback(const Address& address, DataBlock& data);
- void writeCallback(const Address& address);
- void readCallback(const Address& address);
- void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
- void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
- void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
- void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
-
- // returns the thread ID of the request
- int getRequestThreadID(const Address & addr);
- // returns the physical address of the request
- Address getRequestPhysicalAddress(const Address & lineaddr);
- // returns whether a request is a prefetch request
- bool isPrefetchRequest(const Address & lineaddr);
-
- //notifies driver of debug print
- void printDebug();
// called by Tester or Simics
- void makeRequest(Packet* pkt);
- bool doRequest(const CacheMsg& request);
- void issueRequest(const CacheMsg& request);
- bool isReady(const Packet* pkt) const;
- bool isReady(const CacheMsg& request) const; // depricate this function
+ int64_t makeRequest(const RubyRequest & request);
+ bool isReady(const RubyRequest& request) const;
bool empty() const;
- void resetRequestTime(const Address& addr, int thread);
- Address getLogicalAddressOfRequest(Address address, int thread);
- AccessModeType getAccessModeOfRequest(Address address, int thread);
- //uint64 getSequenceNumberOfRequest(Address addr, int thread);
void print(ostream& out) const;
void checkCoherence(const Address& address);
- bool getRubyMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes);
- bool setRubyMemoryValue(const Address& addr, char *value, unsigned int size_in_bytes);
+ // bool getRubyMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes);
+ // bool setRubyMemoryValue(const Address& addr, char *value, unsigned int size_in_bytes);
- void removeRequest(const CacheMsg& request);
+ void removeRequest(SequencerRequest* request);
private:
// Private Methods
bool tryCacheAccess(const Address& addr, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, DataBlock*& data_ptr);
- // void conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread);
- void hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread);
- bool insertRequest(const CacheMsg& request);
+ void issueRequest(const RubyRequest& request);
+
+ void hitCallback(SequencerRequest* request, DataBlock& data);
+ bool insertRequest(SequencerRequest* request);
// Private copy constructor and assignment operator
Sequencer(const Sequencer& obj);
Sequencer& operator=(const Sequencer& obj);
- // Data Members (m_ prefix)
- AbstractChip* m_chip_ptr;
+private:
+ int m_max_outstanding_requests;
+ int m_deadlock_threshold;
+
+ AbstractController* m_controller;
+ MessageBuffer* m_mandatory_q_ptr;
+ CacheMemory* m_dataCache_ptr;
+ CacheMemory* m_instCache_ptr;
// indicates what processor on the chip this sequencer is associated with
int m_version;
+ int m_controller_type;
- // One request table per SMT thread
- Map<Address, CacheMsg>** m_writeRequestTable_ptr;
- Map<Address, CacheMsg>** m_readRequestTable_ptr;
-
- Map<Address, Packet*>* m_packetTable_ptr;
-
+ Map<Address, SequencerRequest*> m_writeRequestTable;
+ Map<Address, SequencerRequest*> m_readRequestTable;
// Global outstanding request count, across all request tables
int m_outstanding_count;
bool m_deadlock_check_scheduled;
diff --git a/src/mem/ruby/system/StoreBuffer.cc b/src/mem/ruby/system/StoreBuffer.cc
deleted file mode 100644
index 280decdd8..000000000
--- a/src/mem/ruby/system/StoreBuffer.cc
+++ /dev/null
@@ -1,302 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/system/StoreBuffer.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/common/Driver.hh"
-#include "mem/gems_common/Vector.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/ruby/profiler/AddressProfiler.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/packet.hh"
-
-// *** Begin Helper class ***
-struct StoreBufferEntry {
- StoreBufferEntry() {} // So we can allocate a vector of StoreBufferEntries
- StoreBufferEntry(const SubBlock& block, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, int thread) : m_subblock(block) {
- m_type = type;
- m_pc = pc;
- m_access_mode = access_mode;
- m_size = size;
- m_thread = thread;
- m_time = g_eventQueue_ptr->getTime();
- }
-
- void print(ostream& out) const
- {
- out << "[StoreBufferEntry: "
- << "SubBlock: " << m_subblock
- << ", Type: " << m_type
- << ", PC: " << m_pc
- << ", AccessMode: " << m_access_mode
- << ", Size: " << m_size
- << ", Thread: " << m_thread
- << ", Time: " << m_time
- << "]";
- }
-
- SubBlock m_subblock;
- CacheRequestType m_type;
- Address m_pc;
- AccessModeType m_access_mode;
- int m_size;
- int m_thread;
- Time m_time;
-};
-
-extern inline
-ostream& operator<<(ostream& out, const StoreBufferEntry& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-// *** End Helper class ***
-
-const int MAX_ENTRIES = 128;
-
-static void inc_index(int& index)
-{
- index++;
- if (index >= MAX_ENTRIES) {
- index = 0;
- }
-}
-
-StoreBuffer::StoreBuffer(AbstractChip* chip_ptr, int version) :
- m_store_cache()
-{
- m_chip_ptr = chip_ptr;
- m_version = version;
- m_queue_ptr = new Vector<StoreBufferEntry>(MAX_ENTRIES);
- m_queue_ptr->setSize(MAX_ENTRIES);
- m_pending = false;
- m_seen_atomic = false;
- m_head = 0;
- m_tail = 0;
- m_size = 0;
- m_deadlock_check_scheduled = false;
-}
-
-StoreBuffer::~StoreBuffer()
-{
- delete m_queue_ptr;
-}
-
-// Used only to check for deadlock
-void StoreBuffer::wakeup()
-{
- // Check for deadlock of any of the requests
- Time current_time = g_eventQueue_ptr->getTime();
-
- int queue_pointer = m_head;
- for (int i=0; i<m_size; i++) {
- if (current_time - (getEntry(queue_pointer).m_time) >= g_DEADLOCK_THRESHOLD) {
- WARN_EXPR(getEntry(queue_pointer));
- WARN_EXPR(m_chip_ptr->getID());
- WARN_EXPR(current_time);
- ERROR_MSG("Possible Deadlock detected");
- }
- inc_index(queue_pointer);
- }
-
- if (m_size > 0) { // If there are still outstanding requests, keep checking
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
- } else {
- m_deadlock_check_scheduled = false;
- }
-}
-
-void StoreBuffer::printConfig(ostream& out)
-{
- out << "Store buffer entries: " << MAX_ENTRIES << " (Only valid if TSO is enabled)" << endl;
-}
-
-// Handle an incoming store request, this method is responsible for
-// calling hitCallback as needed
-void
-StoreBuffer::insertStore(Packet* pkt, const CacheMsg& request)
-{
- Address addr = request.getAddress();
- CacheRequestType type = request.getType();
- Address pc = request.getProgramCounter();
- AccessModeType access_mode = request.getAccessMode();
- int size = request.getSize();
- int threadID = request.getThreadID();
-
- DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "insertStore");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
- assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
- assert(isReady());
-
- // See if we should schedule a deadlock check
- if (m_deadlock_check_scheduled == false) {
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
- m_deadlock_check_scheduled = true;
- }
-
- // Perform the hit-callback for the store
- SubBlock subblock(addr, size);
- if(type == CacheRequestType_ST) {
- g_system_ptr->getDriver()->hitCallback(pkt);
- assert(subblock.getSize() != 0);
- } else {
- // wait to perform the hitCallback until later for Atomics
- }
-
- // Perform possible pre-fetch
- if(!isEmpty()) {
- Packet new_pkt(pkt);
- pkt->req->setFlags(Request::PREFETCH);
- m_chip_ptr->getSequencer(m_version)->makeRequest(&new_pkt);
- }
-
- // Update the StoreCache
- m_store_cache.add(subblock);
-
- // Enqueue the entry
- StoreBufferEntry entry(subblock, type, pc, access_mode, size, threadID); // FIXME
- enqueue(entry);
-
- if(type == CacheRequestType_ATOMIC) {
- m_seen_atomic = true;
- }
-
- processHeadOfQueue();
-}
-
-void StoreBuffer::callBack(const Address& addr, DataBlock& data, Packet* pkt)
-{
- DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "callBack");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
- assert(!isEmpty());
- assert(m_pending == true);
- assert(line_address(addr) == addr);
- assert(line_address(m_pending_address) == addr);
- assert(line_address(peek().m_subblock.getAddress()) == addr);
- CacheRequestType type = peek().m_type;
- //int threadID = peek().m_thread;
- assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
- m_pending = false;
-
- // If oldest entry was ATOMIC, perform the callback
- if(type == CacheRequestType_ST) {
- // We already performed the call back for the store at insert time
- } else {
- // We waited to perform the hitCallback until now for Atomics
- peek().m_subblock.mergeFrom(data); // copy the correct bytes from DataBlock into the SubBlock for the Load part of the atomic Load/Store
- g_system_ptr->getDriver()->hitCallback(pkt);
- m_seen_atomic = false;
-
- /// FIXME - record the time spent in the store buffer - split out ST vs ATOMIC
- }
- assert(peek().m_subblock.getSize() != 0);
-
- // Apply the head entry to the datablock
- peek().m_subblock.mergeTo(data); // For both the Store and Atomic cases
-
- // Update the StoreCache
- m_store_cache.remove(peek().m_subblock);
-
- // Dequeue the entry from the store buffer
- dequeue();
-
- if (isEmpty()) {
- assert(m_store_cache.isEmpty());
- }
-
- if(type == CacheRequestType_ATOMIC) {
- assert(isEmpty());
- }
-
- // See if we can remove any more entries
- processHeadOfQueue();
-}
-
-void StoreBuffer::processHeadOfQueue()
-{
- if(!isEmpty() && !m_pending) {
- StoreBufferEntry& entry = peek();
- assert(m_pending == false);
- m_pending = true;
- m_pending_address = entry.m_subblock.getAddress();
- CacheMsg request(entry.m_subblock.getAddress(), entry.m_subblock.getAddress(), entry.m_type, entry.m_pc, entry.m_access_mode, entry.m_size, PrefetchBit_No, 0, Address(0), entry.m_thread);
- m_chip_ptr->getSequencer(m_version)->doRequest(request);
- }
-}
-
-bool StoreBuffer::isReady() const
-{
- return ((m_size < MAX_ENTRIES) && (!m_seen_atomic));
-}
-
-// Queue implementation methods
-
-StoreBufferEntry& StoreBuffer::peek()
-{
- return getEntry(m_head);
-}
-
-void StoreBuffer::dequeue()
-{
- assert(m_size > 0);
- m_size--;
- inc_index(m_head);
-}
-
-void StoreBuffer::enqueue(const StoreBufferEntry& entry)
-{
- // assert(isReady());
- (*m_queue_ptr)[m_tail] = entry;
- m_size++;
- g_system_ptr->getProfiler()->storeBuffer(m_size, m_store_cache.size());
- inc_index(m_tail);
-}
-
-StoreBufferEntry& StoreBuffer::getEntry(int index)
-{
- return (*m_queue_ptr)[index];
-}
-
-void StoreBuffer::print(ostream& out) const
-{
- out << "[StoreBuffer]";
-}
-
diff --git a/src/mem/ruby/system/StoreBuffer.hh b/src/mem/ruby/system/StoreBuffer.hh
deleted file mode 100644
index 2c9283f4b..000000000
--- a/src/mem/ruby/system/StoreBuffer.hh
+++ /dev/null
@@ -1,121 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef StoreBuffer_H
-#define StoreBuffer_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Consumer.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/protocol/AccessModeType.hh"
-#include "mem/protocol/CacheRequestType.hh"
-#include "mem/ruby/system/StoreCache.hh"
-
-class CacheMsg;
-class DataBlock;
-class SubBlock;
-class StoreBufferEntry;
-class AbstractChip;
-class Packet;
-
-template <class TYPE> class Vector;
-
-class StoreBuffer : public Consumer {
-public:
- // Constructors
- StoreBuffer(AbstractChip* chip_ptr, int version);
-
- // Destructor
- ~StoreBuffer();
-
- // Public Methods
- void wakeup(); // Used only for deadlock detection
- void callBack(const Address& addr, DataBlock& data, Packet* pkt);
- void insertStore(Packet* pkt, const CacheMsg& request);
- void updateSubBlock(SubBlock& sub_block) const { m_store_cache.update(sub_block); }
- bool trySubBlock(const SubBlock& sub_block) const { assert(isReady()); return m_store_cache.check(sub_block); }
- void print(ostream& out) const;
- bool isEmpty() const { return (m_size == 0); }
- bool isReady() const;
-
- // Class methods
- static void printConfig(ostream& out);
-
-private:
- // Private Methods
- void processHeadOfQueue();
-
- StoreBufferEntry& peek();
- void dequeue();
- void enqueue(const StoreBufferEntry& entry);
- StoreBufferEntry& getEntry(int index);
-
- // Private copy constructor and assignment operator
- StoreBuffer(const StoreBuffer& obj);
- StoreBuffer& operator=(const StoreBuffer& obj);
-
- // Data Members (m_ prefix)
- int m_version;
-
- Vector<StoreBufferEntry>* m_queue_ptr;
- int m_head;
- int m_tail;
- int m_size;
-
- StoreCache m_store_cache;
-
- AbstractChip* m_chip_ptr;
- bool m_pending;
- Address m_pending_address;
- bool m_seen_atomic;
- bool m_deadlock_check_scheduled;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const StoreBuffer& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const StoreBuffer& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //StoreBuffer_H
diff --git a/src/mem/ruby/system/StoreCache.cc b/src/mem/ruby/system/StoreCache.cc
deleted file mode 100644
index a11b2ac50..000000000
--- a/src/mem/ruby/system/StoreCache.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/system/StoreCache.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/common/Driver.hh"
-#include "mem/gems_common/Vector.hh"
-#include "mem/ruby/common/DataBlock.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/gems_common/Map.hh"
-
-// Helper class
-struct StoreCacheEntry {
- StoreCacheEntry() {
- m_byte_counters.setSize(RubyConfig::dataBlockBytes());
- for(int i=0; i<m_byte_counters.size(); i++) {
- m_byte_counters[i] = 0;
- }
- m_line_counter = 0;
-
- }
- Address m_addr;
- DataBlock m_datablock;
- Vector<int> m_byte_counters;
- int m_line_counter;
-};
-
-StoreCache::StoreCache()
-{
- m_internal_cache_ptr = new Map<Address, StoreCacheEntry>;
-}
-
-StoreCache::~StoreCache()
-{
- delete m_internal_cache_ptr;
-}
-
-bool StoreCache::isEmpty() const
-{
- return m_internal_cache_ptr->size() == 0;
-}
-
-int StoreCache::size() const { return m_internal_cache_ptr->size(); }
-
-void StoreCache::add(const SubBlock& block)
-{
- if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
- m_internal_cache_ptr->allocate(line_address(block.getAddress()));
- }
-
- StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
-
- // For each byte in entry change the bytes and inc. the counters
- int starting_offset = block.getAddress().getOffset();
- int size = block.getSize();
- for (int index=0; index < size; index++) {
- // Update counter
- entry.m_byte_counters[starting_offset+index]++;
-
- // Record data
- entry.m_datablock.setByte(starting_offset+index, block.getByte(index));
-
- DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
- DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(block.getByte(index)));
- DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
- }
-
- // Increment the counter
- entry.m_line_counter++;
-}
-
-void StoreCache::remove(const SubBlock& block)
-{
- assert(m_internal_cache_ptr->exist(line_address(block.getAddress())));
-
- StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
-
- // Decrement the byte counters
- int starting_offset = block.getAddress().getOffset();
- int size = block.getSize();
- for (int index=0; index < size; index++) {
- // Update counter
- entry.m_byte_counters[starting_offset+index]--;
- }
-
- // Decrement the line counter
- entry.m_line_counter--;
- assert(entry.m_line_counter >= 0);
-
- // Check to see if we should de-allocate this entry
- if (entry.m_line_counter == 0) {
- m_internal_cache_ptr->deallocate(line_address(block.getAddress()));
- }
-}
-
-bool StoreCache::check(const SubBlock& block) const
-{
- if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
- return false;
- } else {
- // Lookup the entry
- StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
-
- // See if all the bytes are valid
- int starting_offset = block.getAddress().getOffset();
- int size = block.getSize();
- for (int index=0; index < size; index++) {
- if (entry.m_byte_counters[starting_offset+index] > 0) {
- // So far so good
- } else {
- // not all the bytes were valid
- return false;
- }
- }
- }
- return true;
-}
-
-void StoreCache::update(SubBlock& block) const
-{
- if (m_internal_cache_ptr->exist(line_address(block.getAddress()))) {
- // Lookup the entry
- StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
-
- // Copy all appropriate and valid bytes from the store cache to
- // the SubBlock
- int starting_offset = block.getAddress().getOffset();
- int size = block.getSize();
- for (int index=0; index < size; index++) {
-
- DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
- DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(entry.m_datablock.getByte(starting_offset+index)));
- DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
-
- // If this byte is valid, copy the data into the sub-block
- if (entry.m_byte_counters[starting_offset+index] > 0) {
- block.setByte(index, entry.m_datablock.getByte(starting_offset+index));
- }
- }
- }
-}
-
-void StoreCache::print(ostream& out) const
-{
- out << "[StoreCache]";
-}
-
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
index 0e1a29130..9d1119f01 100644
--- a/src/mem/ruby/system/System.cc
+++ b/src/mem/ruby/system/System.cc
@@ -38,134 +38,306 @@
#include "mem/ruby/system/System.hh"
+#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/network/Network.hh"
-#include "mem/ruby/tester/Tester.hh"
-#include "mem/ruby/tester/SyntheticDriver.hh"
-#include "mem/ruby/tester/DeterministicDriver.hh"
-#include "mem/protocol/Chip.hh"
-//#include "mem/ruby/recorder/Tracer.hh"
+#include "mem/ruby/recorder/Tracer.hh"
#include "mem/protocol/Protocol.hh"
-
-RubySystem::RubySystem()
+#include "mem/ruby/buffers/MessageBuffer.hh"
+#include "mem/ruby/system/Sequencer.hh"
+#include "mem/ruby/system/DMASequencer.hh"
+#include "mem/ruby/system/MemoryVector.hh"
+#include "mem/protocol/ControllerFactory.hh"
+#include "mem/ruby/slicc_interface/AbstractController.hh"
+#include "mem/ruby/system/CacheMemory.hh"
+#include "mem/ruby/system/DirectoryMemory.hh"
+#include "mem/ruby/network/simple/Topology.hh"
+#include "mem/ruby/network/simple/SimpleNetwork.hh"
+#include "mem/ruby/system/RubyPort.hh"
+#include "mem/ruby/network/garnet-flexible-pipeline/GarnetNetwork.hh"
+#include "mem/ruby/network/garnet-fixed-pipeline/GarnetNetwork_d.hh"
+#include "mem/ruby/system/MemoryControl.hh"
+
+int RubySystem::m_random_seed;
+bool RubySystem::m_randomization;
+int RubySystem::m_tech_nm;
+int RubySystem::m_freq_mhz;
+int RubySystem::m_block_size_bytes;
+int RubySystem::m_block_size_bits;
+uint64 RubySystem::m_memory_size_bytes;
+int RubySystem::m_memory_size_bits;
+
+map< string, RubyPort* > RubySystem::m_ports;
+map< string, CacheMemory* > RubySystem::m_caches;
+map< string, DirectoryMemory* > RubySystem::m_directories;
+map< string, Sequencer* > RubySystem::m_sequencers;
+map< string, DMASequencer* > RubySystem::m_dma_sequencers;
+map< string, AbstractController* > RubySystem::m_controllers;
+map< string, MemoryControl* > RubySystem::m_memorycontrols;
+
+
+Network* RubySystem::m_network_ptr;
+map< string, Topology*> RubySystem::m_topologies;
+Profiler* RubySystem::m_profiler_ptr;
+Tracer* RubySystem::m_tracer_ptr;
+
+MemoryVector* RubySystem::m_mem_vec_ptr;
+
+
+RubySystem* RubySystem::create(const vector <RubyObjConf> & sys_conf)
{
- init();
- m_preinitialized_driver = false;
- createDriver();
-
- /* gem5:Binkert for decomissiong of tracer
- m_tracer_ptr = new Tracer;
- */
-
- /* gem5:Arka for decomissiong of log_tm
- if (XACT_MEMORY) {
- m_xact_isolation_checker = new XactIsolationChecker;
- m_xact_commit_arbiter = new XactCommitArbiter;
- m_xact_visualizer = new XactVisualizer;
- }
-*/
+ if (g_system_ptr == NULL)
+ return new RubySystem(sys_conf);
+ return g_system_ptr;
}
-RubySystem::RubySystem(Driver* _driver)
+void RubySystem::init(const vector<string> & argv)
{
- init();
- m_preinitialized_driver = true;
- m_driver_ptr = _driver;
-}
+ for (size_t i=0; i < argv.size(); i+=2) {
+ if (argv[i] == "random_seed") {
+ m_random_seed = atoi(argv[i+1].c_str());
+ srandom(m_random_seed);
+ } else if (argv[i] == "randomization") {
+ m_randomization = string_to_bool(argv[i+1]);
+ } else if (argv[i] == "tech_nm") {
+ m_tech_nm = atoi(argv[i+1].c_str());
+ } else if (argv[i] == "freq_mhz") {
+ m_freq_mhz = atoi(argv[i+1].c_str());
+ } else if (argv[i] == "block_size_bytes") {
+ m_block_size_bytes = atoi(argv[i+1].c_str());
+ assert(is_power_of_2(m_block_size_bytes));
+ m_block_size_bits = log_int(m_block_size_bytes);
+ } else if (argv[i] == "debug") {
+
+ } else if (argv[i] == "tracer") {
+
+ } else if (argv[i] == "profiler") {
+
+ // } else if (argv[i] == "MI_example") {
-RubySystem::~RubySystem()
-{
- for (int i = 0; i < m_chip_vector.size(); i++) {
- delete m_chip_vector[i];
+ } else {
+ cerr << "Error: Unknown RubySystem config parameter -- " << argv[i] << endl;
+ assert(0);
+ }
}
- if (!m_preinitialized_driver)
- delete m_driver_ptr;
- delete m_network_ptr;
- delete m_profiler_ptr;
- /* gem5:Binkert for decomissiong of tracer
- delete m_tracer_ptr;
- */
}
-void RubySystem::init()
+RubySystem::RubySystem(const vector <RubyObjConf> & sys_conf)
{
- DEBUG_MSG(SYSTEM_COMP, MedPrio,"initializing");
-
- m_driver_ptr = NULL;
- m_profiler_ptr = new Profiler;
+ // DEBUG_MSG(SYSTEM_COMP, MedPrio,"initializing");
+
+ for (size_t i=0;i<sys_conf.size(); i++) {
+ const string & type = sys_conf[i].type;
+ const string & name = sys_conf[i].name;
+ const vector<string> & argv = sys_conf[i].argv;
+ if (type == "RubySystem") {
+ init(argv); // initialize system-wide variables before doing anything else!
+ } else if (type == "Debug") {
+ g_debug_ptr = new Debug(name, argv);
+ }
+ }
- // NETWORK INITIALIZATION
- // create the network by calling a function that calls new
- m_network_ptr = Network::createNetwork(RubyConfig::numberOfChips());
+ assert( g_debug_ptr != NULL);
+ g_eventQueue_ptr = new RubyEventQueue;
+ g_system_ptr = this;
+ m_mem_vec_ptr = new MemoryVector;
+
+ /* object contruction is broken into two steps (Constructor and init) to avoid cyclic dependencies
+ * e.g. a sequencer needs a pointer to a controller and a controller needs a pointer to a sequencer
+ */
+
+ vector<string> memory_control_names;
+
+ for (size_t i=0;i<sys_conf.size(); i++) {
+ const string & type = sys_conf[i].type;
+ const string & name = sys_conf[i].name;
+ const vector<string> & argv = sys_conf[i].argv;
+ if (type == "RubySystem" || type == "Debug")
+ continue;
+ else if (type == "SetAssociativeCache")
+ m_caches[name] = new CacheMemory(name);
+ else if (type == "DirectoryMemory")
+ m_directories[name] = new DirectoryMemory(name);
+ else if (type == "Sequencer") {
+ m_sequencers[name] = new Sequencer(name);
+ m_ports[name] = m_sequencers[name];
+ } else if (type == "DMASequencer") {
+ m_dma_sequencers[name] = new DMASequencer(name);
+ m_ports[name] = m_dma_sequencers[name];
+ } else if (type == "Topology") {
+ assert(m_topologies.size() == 0); // only one toplogy at a time is supported right now
+ m_topologies[name] = new Topology(name);
+ } else if (type == "SimpleNetwork") {
+ assert(m_network_ptr == NULL); // only one network at a time is supported right now
+ m_network_ptr = new SimpleNetwork(name);
+ } else if (type.find("generated") == 0) {
+ string controller_type = type.substr(10);
+ m_controllers[name] = ControllerFactory::createController(controller_type, name);
+// printf ("ss: generated %s \n", controller_type);
+//added by SS
+ } else if (type == "Tracer") {
+ //m_tracers[name] = new Tracer(name);
+ m_tracer_ptr = new Tracer(name);
+ } else if (type == "Profiler") {
+ m_profiler_ptr = new Profiler(name);
+ } else if (type == "GarnetNetwork") {
+ assert(m_network_ptr == NULL); // only one network at a time is supported right now
+ m_network_ptr = new GarnetNetwork(name);
+ } else if (type == "GarnetNetwork_d") {
+ assert(m_network_ptr == NULL); // only one network at a time is supported right now
+ m_network_ptr = new GarnetNetwork_d(name);
+ } else if (type == "MemoryControl") {
+ m_memorycontrols[name] = new MemoryControl(name);
+ memory_control_names.push_back (name);
+ } else {
+ cerr << "Error: Unknown object type -- " << type << endl;
+ assert(0);
+ }
+ }
- DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed network");
+ for (size_t i=0;i<sys_conf.size(); i++) {
+ string type = sys_conf[i].type;
+ string name = sys_conf[i].name;
+ const vector<string> & argv = sys_conf[i].argv;
+ if (type == "Topology")
+ m_topologies[name]->init(argv);
+ }
- // CHIP INITIALIZATION
- m_chip_vector.setSize(RubyConfig::numberOfChips());// create the vector of pointers to processors
- for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
- // create the chip
- m_chip_vector[i] = new Chip(i, m_network_ptr);
- DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed a chip");
+ for (size_t i=0;i<sys_conf.size(); i++) {
+ string type = sys_conf[i].type;
+ string name = sys_conf[i].name;
+ const vector<string> & argv = sys_conf[i].argv;
+ if (type == "SimpleNetwork" || type == "GarnetNetwork" || type == "GarnetNetwork_d"){
+ m_network_ptr->init(argv);
+ }
}
- // These must be after the chips are constructed
+ for (size_t i=0;i<sys_conf.size(); i++) {
+ string type = sys_conf[i].type;
+ string name = sys_conf[i].name;
+ const vector<string> & argv = sys_conf[i].argv;
+ if (type == "MemoryControl" ){
+ m_memorycontrols[name]->init(argv);
+ }
+ }
-#if 0
- if (!g_SIMICS) {
- if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
- m_driver_ptr = new SyntheticDriver(this);
- } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
- m_driver_ptr = new DeterministicDriver(this);
- } else if (g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
- ERROR_MSG("SYNTHETIC and DETERMINISTIC DRIVERS are exclusive and cannot be both enabled");
- } else {
- // normally make tester object, otherwise make an opal interface object.
- if (!OpalInterface::isOpalLoaded()) {
- m_driver_ptr = new Tester(this);
- } else {
- m_driver_ptr = new OpalInterface(this);
- }
+ for (size_t i=0;i<sys_conf.size(); i++) {
+ string type = sys_conf[i].type;
+ string name = sys_conf[i].name;
+ const vector<string> & argv = sys_conf[i].argv;
+ if (type == "RubySystem" || type == "Debug")
+ continue;
+ else if (type == "SetAssociativeCache")
+ m_caches[name]->init(argv);
+ else if (type == "DirectoryMemory")
+ m_directories[name]->init(argv);
+ else if (type == "MemoryControl")
+ continue;
+ else if (type == "Sequencer")
+ m_sequencers[name]->init(argv);
+ else if (type == "DMASequencer")
+ m_dma_sequencers[name]->init(argv);
+ else if (type == "Topology")
+ continue;
+ else if (type == "SimpleNetwork" || type == "GarnetNetwork" || type == "GarnetNetwork_d")
+ continue;
+ else if (type.find("generated") == 0) {
+ string controller_type = type.substr(11);
+ m_controllers[name]->init(m_network_ptr, argv);
}
- } else {
- // detect if opal is loaded or not
- if (OpalInterface::isOpalLoaded()) {
- m_driver_ptr = new OpalInterface(this);
- } else {
+//added by SS
+ else if (type == "Tracer")
+ //m_tracers[name]->init(argv);
+ m_tracer_ptr->init(argv);
+ else if (type == "Profiler")
+ m_profiler_ptr->init(argv, memory_control_names);
+// else if (type == "MI_example"){
+// }
+ else
assert(0);
- /* Need to allocate a driver here */
- // m_driver_ptr = new SimicsDriver(this);
- }
}
-#endif
+
+// m_profiler_ptr = new Profiler;
+
+ // calculate system-wide parameters
+ m_memory_size_bytes = 0;
+ DirectoryMemory* prev = NULL;
+ for (map< string, DirectoryMemory*>::const_iterator it = m_directories.begin();
+ it != m_directories.end(); it++) {
+ if (prev != NULL)
+ assert((*it).second->getSize() == prev->getSize()); // must be equal for proper address mapping
+ m_memory_size_bytes += (*it).second->getSize();
+ prev = (*it).second;
+ }
+ m_mem_vec_ptr->setSize(m_memory_size_bytes);
+ m_memory_size_bits = log_int(m_memory_size_bytes);
+
+// m_tracer_ptr = new Tracer;
DEBUG_MSG(SYSTEM_COMP, MedPrio,"finished initializing");
DEBUG_NEWLINE(SYSTEM_COMP, MedPrio);
}
-void RubySystem::createDriver()
+RubySystem::~RubySystem()
{
- if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
- cerr << "Creating Synthetic Driver" << endl;
- m_driver_ptr = new SyntheticDriver(this);
- } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
- cerr << "Creating Deterministic Driver" << endl;
- m_driver_ptr = new DeterministicDriver(this);
+ /*
+ for (int i=0; i < MachineType_base_level(MachineType_NUM); i++) {
+ for (int j=0; j < RubyConfig::getNumberOfControllersPerType(i); j++ ) {
+ delete m_controllers[i][j];
}
+ }
+ delete m_network_ptr;
+ delete m_profiler_ptr;
+ delete m_tracer_ptr;
+ */
+}
+
+void RubySystem::printSystemConfig(ostream & out)
+{
+ out << "RubySystem config:" << endl;
+ out << " random_seed: " << m_random_seed << endl;
+ out << " randomization: " << m_randomization << endl;
+ out << " tech_nm: " << m_tech_nm << endl;
+ out << " freq_mhz: " << m_freq_mhz << endl;
+ out << " block_size_bytes: " << m_block_size_bytes << endl;
+ out << " block_size_bits: " << m_block_size_bits << endl;
+ out << " memory_size_bytes: " << m_memory_size_bytes << endl;
+ out << " memory_size_bits: " << m_memory_size_bits << endl;
+
}
-void RubySystem::printConfig(ostream& out) const
+void RubySystem::printConfig(ostream& out)
{
out << "\n================ Begin RubySystem Configuration Print ================\n\n";
- RubyConfig::printConfiguration(out);
- out << endl;
- getChip(0)->printConfig(out);
+ // RubyConfig::printConfiguration(out);
+ // out << endl;
+ printSystemConfig(out);
+ for (map<string, AbstractController*>::const_iterator it = m_controllers.begin();
+ it != m_controllers.end(); it++) {
+ (*it).second->printConfig(out);
+ }
+ for (map<string, CacheMemory*>::const_iterator it = m_caches.begin();
+ it != m_caches.end(); it++) {
+ (*it).second->printConfig(out);
+ }
+ DirectoryMemory::printGlobalConfig(out);
+ for (map<string, DirectoryMemory*>::const_iterator it = m_directories.begin();
+ it != m_directories.end(); it++) {
+ (*it).second->printConfig(out);
+ }
+ for (map<string, Sequencer*>::const_iterator it = m_sequencers.begin();
+ it != m_sequencers.end(); it++) {
+ (*it).second->printConfig(out);
+ }
+
m_network_ptr->printConfig(out);
- m_driver_ptr->printConfig(out);
m_profiler_ptr->printConfig(out);
+
out << "\n================ End RubySystem Configuration Print ================\n\n";
}
void RubySystem::printStats(ostream& out)
{
+
const time_t T = time(NULL);
tm *localTime = localtime(&T);
char buf[100];
@@ -174,32 +346,30 @@ void RubySystem::printStats(ostream& out)
out << "Real time: " << buf << endl;
m_profiler_ptr->printStats(out);
- for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
- for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
- m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->printStats(out);
- }
- }
m_network_ptr->printStats(out);
- m_driver_ptr->printStats(out);
- Chip::printStats(out);
+ for (map<string, AbstractController*>::const_iterator it = m_controllers.begin();
+ it != m_controllers.end(); it++) {
+ (*it).second->printStats(out);
+ }
}
void RubySystem::clearStats() const
{
+ /*
m_profiler_ptr->clearStats();
+ for (int i=0; i<m_rubyRequestQueues.size(); i++)
+ for (int j=0;j<m_rubyRequestQueues[i].size(); j++)
+ m_rubyRequestQueues[i][j]->clearStats();
m_network_ptr->clearStats();
- m_driver_ptr->clearStats();
- Chip::clearStats();
- for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
- for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
- m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->clearStats();
- }
- }
+ for (int i=0; i < MachineType_base_level(MachineType_NUM); i++)
+ m_controllers[i][0]->clearStats();
+ */
}
void RubySystem::recordCacheContents(CacheRecorder& tr) const
{
- for (int i = 0; i < m_chip_vector.size(); i++) {
+ /*
+ for (int i = 0; i < m_chip_vector.size(); i++) {
for (int m_version = 0; m_version < RubyConfig::numberOfProcsPerChip(); m_version++) {
if (Protocol::m_TwoLevelCache) {
m_chip_vector[i]->m_L1Cache_L1IcacheMemory_vec[m_version]->setAsInstructionCache(true);
@@ -210,6 +380,7 @@ void RubySystem::recordCacheContents(CacheRecorder& tr) const
}
m_chip_vector[i]->recordCacheContents(tr);
}
+ */
}
#ifdef CHECK_COHERENCE
@@ -222,7 +393,7 @@ void RubySystem::recordCacheContents(CacheRecorder& tr) const
// and "isBlockExclusive" that are specific to that protocol
//
void RubySystem::checkGlobalCoherenceInvariant(const Address& addr ) {
-
+ /*
NodeID exclusive = -1;
bool sharedDetected = false;
NodeID lastShared = -1;
@@ -262,6 +433,7 @@ void RubySystem::checkGlobalCoherenceInvariant(const Address& addr ) {
}
}
}
+ */
}
#endif
diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh
index bc493dd16..40c425ad7 100644
--- a/src/mem/ruby/system/System.hh
+++ b/src/mem/ruby/system/System.hh
@@ -41,81 +41,129 @@
#ifndef SYSTEM_H
#define SYSTEM_H
+#include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/protocol/MachineType.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include <map>
class Profiler;
class Network;
-class Driver;
class CacheRecorder;
class Tracer;
class Sequencer;
-class XactIsolationChecker;
-class XactCommitArbiter;
-class XactVisualizer;
-class TransactionInterfaceManager;
+class DMASequencer;
+class MemoryVector;
+class AbstractController;
+class MessageBuffer;
+class CacheMemory;
+class DirectoryMemory;
+class Topology;
+class MemoryControl;
+
+struct RubyObjConf {
+ string type;
+ string name;
+ vector<string> argv;
+ RubyObjConf(string _type, string _name, vector<string> _argv)
+ : type(_type), name(_name), argv(_argv)
+ {}
+};
class RubySystem {
public:
- // Constructors
- RubySystem();
- RubySystem(Driver* _driver); // used when driver is already instantiated (e.g. M5's RubyMem)
-
+ static RubySystem* create(const vector <RubyObjConf> & sys_conf);
// Destructor
~RubySystem();
+ // config accessors
+ static int getRandomSeed() { return m_random_seed; }
+ static int getRandomization() { return m_randomization; }
+ static int getTechNm() { return m_tech_nm; }
+ static int getFreqMhz() { return m_freq_mhz; }
+ static int getBlockSizeBytes() { return m_block_size_bytes; }
+ static int getBlockSizeBits() { return m_block_size_bits; }
+ static uint64 getMemorySizeBytes() { return m_memory_size_bytes; }
+ static int getMemorySizeBits() { return m_memory_size_bits; }
+
// Public Methods
- int getNumProcessors() { return RubyConfig::numberOfProcessors(); }
- int getNumMemories() { return RubyConfig::numberOfMemories(); }
- Profiler* getProfiler() { return m_profiler_ptr; }
- Driver* getDriver() { assert(m_driver_ptr != NULL); return m_driver_ptr; }
+ static RubyPort* getPortOnly(const string & name) {
+ assert(m_ports.count(name) == 1); return m_ports[name]; }
+ static RubyPort* getPort(const string & name, void (*hit_callback)(int64_t)) {
+ assert(m_ports.count(name) == 1); m_ports[name]->registerHitCallback(hit_callback); return m_ports[name]; }
+ static Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
+ static Topology* getTopology(const string & name) { assert(m_topologies.count(name) == 1); return m_topologies[name]; }
+ static CacheMemory* getCache(const string & name) { assert(m_caches.count(name) == 1); return m_caches[name]; }
+ static DirectoryMemory* getDirectory(const string & name) { assert(m_directories.count(name) == 1); return m_directories[name]; }
+ static MemoryControl* getMemoryControl(const string & name) { assert(m_memorycontrols.count(name) == 1); return m_memorycontrols[name]; }
+ static Sequencer* getSequencer(const string & name) { assert(m_sequencers.count(name) == 1); return m_sequencers[name]; }
+ static DMASequencer* getDMASequencer(const string & name) { assert(m_dma_sequencers.count(name) == 1); return m_dma_sequencers[name]; }
+ static AbstractController* getController(const string & name) { assert(m_controllers.count(name) == 1); return m_controllers[name]; }
+
+ static RubyEventQueue* getEventQueue() { return g_eventQueue_ptr; }
+
+ static int getNumberOfDirectories() { return m_directories.size(); }
+ static int getNumberOfSequencers() { return m_sequencers.size(); }
+
+ Profiler* getProfiler() {assert(m_profiler_ptr != NULL); return m_profiler_ptr; }
Tracer* getTracer() { assert(m_tracer_ptr != NULL); return m_tracer_ptr; }
- Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
- XactIsolationChecker* getXactIsolationChecker() { assert(m_xact_isolation_checker!= NULL); return m_xact_isolation_checker;}
- XactCommitArbiter* getXactCommitArbiter() { assert(m_xact_commit_arbiter!= NULL); return m_xact_commit_arbiter;}
- XactVisualizer* getXactVisualizer() { assert(m_xact_visualizer!= NULL); return m_xact_visualizer;}
-
- AbstractChip* getChip(int chipNumber) const { assert(m_chip_vector[chipNumber] != NULL); return m_chip_vector[chipNumber];}
- Sequencer* getSequencer(int procNumber) const {
- assert(procNumber < RubyConfig::numberOfProcessors());
- return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getSequencer(procNumber%RubyConfig::numberOfProcsPerChip());
- }
- TransactionInterfaceManager* getTransactionInterfaceManager(int procNumber) const {
- return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getTransactionInterfaceManager(procNumber%RubyConfig::numberOfProcsPerChip());
- }
+ static MemoryVector* getMemoryVector() { assert(m_mem_vec_ptr != NULL); return m_mem_vec_ptr;}
+
void recordCacheContents(CacheRecorder& tr) const;
- void printConfig(ostream& out) const;
- void printStats(ostream& out);
+ static void printConfig(ostream& out);
+ static void printStats(ostream& out);
void clearStats() const;
+ uint64 getInstructionCount(int thread) { return 1; }
+ static uint64 getCycleCount(int thread) { return g_eventQueue_ptr->getTime(); }
+
void print(ostream& out) const;
+ /*
#ifdef CHECK_COHERENCE
void checkGlobalCoherenceInvariant(const Address& addr);
#endif
+ */
private:
- // Private Methods
- void init();
- void createDriver();
+ // Constructors
+ RubySystem(const vector <RubyObjConf> & cfg_file);
// Private copy constructor and assignment operator
RubySystem(const RubySystem& obj);
RubySystem& operator=(const RubySystem& obj);
+ void init(const vector<string> & argv);
+
+ static void printSystemConfig(ostream& out);
+
+private:
+ // configuration parameters
+ static int m_random_seed;
+ static bool m_randomization;
+ static int m_tech_nm;
+ static int m_freq_mhz;
+ static int m_block_size_bytes;
+ static int m_block_size_bits;
+ static uint64 m_memory_size_bytes;
+ static int m_memory_size_bits;
+
// Data Members (m_ prefix)
- Network* m_network_ptr;
- Vector<AbstractChip*> m_chip_vector;
- Profiler* m_profiler_ptr;
- bool m_preinitialized_driver;
- Driver* m_driver_ptr;
- Tracer* m_tracer_ptr;
- XactIsolationChecker *m_xact_isolation_checker;
- XactCommitArbiter *m_xact_commit_arbiter;
- XactVisualizer *m_xact_visualizer;
+ static Network* m_network_ptr;
+ static map< string, Topology* > m_topologies;
+ static map< string, RubyPort* > m_ports;
+ static map< string, CacheMemory* > m_caches;
+ static map< string, DirectoryMemory* > m_directories;
+ static map< string, Sequencer* > m_sequencers;
+ static map< string, DMASequencer* > m_dma_sequencers;
+ static map< string, AbstractController* > m_controllers;
+ static map< string, MemoryControl* > m_memorycontrols;
+
+ //added by SS
+ //static map< string, Tracer* > m_tracers;
+
+ static Profiler* m_profiler_ptr;
+ static Tracer* m_tracer_ptr;
+ static MemoryVector* m_mem_vec_ptr;
};
// Output operator declaration
diff --git a/src/mem/ruby/system/TBETable.hh b/src/mem/ruby/system/TBETable.hh
index 2a0c78f06..7d2daa55a 100644
--- a/src/mem/ruby/system/TBETable.hh
+++ b/src/mem/ruby/system/TBETable.hh
@@ -43,7 +43,6 @@
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
#include "mem/ruby/system/System.hh"
template<class ENTRY>
@@ -51,19 +50,20 @@ class TBETable {
public:
// Constructors
- TBETable(AbstractChip* chip_ptr);
+ TBETable(int number_of_TBEs);
+
// Destructor
//~TBETable();
// Public Methods
- static void printConfig(ostream& out) { out << "TBEs_per_TBETable: " << NUMBER_OF_TBES << endl; }
+ void printConfig(ostream& out) { out << "TBEs_per_TBETable: " << m_number_of_TBEs << endl; }
bool isPresent(const Address& address) const;
void allocate(const Address& address);
void deallocate(const Address& address);
- bool areNSlotsAvailable(int n) const { return (NUMBER_OF_TBES - m_map.size()) >= n; }
+ bool areNSlotsAvailable(int n) const { return (m_number_of_TBEs - m_map.size()) >= n; }
ENTRY& lookup(const Address& address);
const ENTRY& lookup(const Address& address) const;
@@ -79,7 +79,9 @@ private:
// Data Members (m_prefix)
Map<Address, ENTRY> m_map;
- AbstractChip* m_chip_ptr;
+
+private:
+ int m_number_of_TBEs;
};
// Output operator declaration
@@ -100,11 +102,12 @@ ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj)
// ****************************************************************
+
template<class ENTRY>
extern inline
-TBETable<ENTRY>::TBETable(AbstractChip* chip_ptr)
+TBETable<ENTRY>::TBETable(int number_of_TBEs)
{
- m_chip_ptr = chip_ptr;
+ m_number_of_TBEs = number_of_TBEs;
}
// PUBLIC METHODS
@@ -115,7 +118,7 @@ extern inline
bool TBETable<ENTRY>::isPresent(const Address& address) const
{
assert(address == line_address(address));
- assert(m_map.size() <= NUMBER_OF_TBES);
+ assert(m_map.size() <= m_number_of_TBEs);
return m_map.exist(address);
}
@@ -124,7 +127,7 @@ extern inline
void TBETable<ENTRY>::allocate(const Address& address)
{
assert(isPresent(address) == false);
- assert(m_map.size() < NUMBER_OF_TBES);
+ assert(m_map.size() < m_number_of_TBEs);
g_system_ptr->getProfiler()->L2tbeUsageSample(m_map.size());
m_map.add(address, ENTRY());
}
diff --git a/src/mem/ruby/tester/BarrierGenerator.cc b/src/mem/ruby/tester/BarrierGenerator.cc
deleted file mode 100644
index 9dbcf39fd..000000000
--- a/src/mem/ruby/tester/BarrierGenerator.cc
+++ /dev/null
@@ -1,333 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: BarrierGenerator.C 1.3 2005/01/19 13:12:35-06:00 mikem@maya.cs.wisc.edu $
- *
- */
-
-#include "mem/ruby/tester/BarrierGenerator.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/ruby/tester/SyntheticDriver.hh"
-#include "mem/protocol/Chip.hh"
-
-BarrierGenerator::BarrierGenerator(NodeID node, SyntheticDriver& driver) :
- m_driver(driver)
-{
- m_status = BarrierGeneratorStatus_Thinking;
- m_last_transition = 0;
- m_node = node;
- m_counter = 0;
- proc_counter = 0;
- m_local_sense = false;
-
- m_total_think = 0;
- m_think_periods = 0;
-
- g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
-}
-
-BarrierGenerator::~BarrierGenerator()
-{
-}
-
-void BarrierGenerator::wakeup()
-{
- DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
- DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
-
- if (m_status == BarrierGeneratorStatus_Thinking) {
- m_barrier_done = false;
- m_local_sense = !m_local_sense;
- m_status = BarrierGeneratorStatus_Test_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateTest(); // Test
- } else if (m_status == BarrierGeneratorStatus_Test_Waiting) {
- m_status = BarrierGeneratorStatus_Test_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateTest(); // Test
- } else if (m_status == BarrierGeneratorStatus_Release_Waiting) {
- m_status = BarrierGeneratorStatus_Release_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateRelease(); // Test
- } else if (m_status == BarrierGeneratorStatus_StoreBarrierCounter_Waiting) {
- m_status = BarrierGeneratorStatus_StoreBarrierCounter_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateStoreCtr();
- } else if (m_status == BarrierGeneratorStatus_StoreFlag_Waiting) {
- m_status = BarrierGeneratorStatus_StoreFlag_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateStoreFlag();
- } else if (m_status == BarrierGeneratorStatus_Holding) {
- m_status = BarrierGeneratorStatus_Release_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateRelease(); // Release
- } else if (m_status == BarrierGeneratorStatus_Before_Swap) {
- m_status = BarrierGeneratorStatus_Swap_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateSwap();
- } else if (m_status == BarrierGeneratorStatus_SpinFlag_Ready) {
- m_status = BarrierGeneratorStatus_SpinFlag_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateLoadFlag();
- } else {
- WARN_EXPR(m_status);
- ERROR_MSG("Invalid status");
- }
-}
-
-void BarrierGenerator::performCallback(NodeID proc, SubBlock& data)
-{
- Address address = data.getAddress();
- assert(proc == m_node);
-
- DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
- DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
- DEBUG_EXPR(TESTER_COMP, LowPrio, address);
- DEBUG_EXPR(TESTER_COMP, LowPrio, data);
-
- if (m_status == BarrierGeneratorStatus_Test_Pending) {
- uint8 dat = data.readByte();
- uint8 lock = dat >> 7;
- if (lock == 1) {
- // Locked - keep spinning
- m_status = BarrierGeneratorStatus_Test_Waiting;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- } else {
- // Unlocked - try the swap
- m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- m_status = BarrierGeneratorStatus_Before_Swap;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- }
- } else if (m_status == BarrierGeneratorStatus_Swap_Pending) {
- m_driver.recordSwapLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- uint8 dat = data.readByte();
- uint8 lock = dat >> 7;
- if (lock == 1) {
- // We failed to aquire the lock
- m_status = BarrierGeneratorStatus_Test_Waiting;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- } else {
- // We acquired the lock
- dat = dat | 0x80;
- data.writeByte(dat);
- m_status = BarrierGeneratorStatus_StoreBarrierCounter_Waiting;
- m_last_transition = g_eventQueue_ptr->getTime();
- DEBUG_MSG(TESTER_COMP, HighPrio, "Acquired");
- DEBUG_EXPR(TESTER_COMP, HighPrio, proc);
- DEBUG_EXPR(TESTER_COMP, HighPrio, g_eventQueue_ptr->getTime());
- // g_eventQueue_ptr->scheduleEvent(this, holdTime());
-
- g_eventQueue_ptr->scheduleEvent(this, 1);
-
- // initiateLoadCtr();
- }
- } else if (m_status == BarrierGeneratorStatus_StoreBarrierCounter_Pending) {
-
- // if value == p, reset counter and set local sense flag
- uint8 ctr = data.readByte();
- //uint8 sense = ctr >> 4;
- ctr = ctr & 0x0F;
-
- ctr++;
- data.writeByte( ctr | 0x80); // store counter and lock
-
- //cout << m_node << " incremented Barrier_ctr to " << (int)ctr << ", " << data << "\n";
-
- if (ctr == (uint8) 16) {
-
- data.writeByte( 0x0 );
- m_status = BarrierGeneratorStatus_StoreFlag_Waiting;
- m_barrier_done = true;
-
- g_eventQueue_ptr->scheduleEvent(this, 1);
- }
- else {
-
- m_status = BarrierGeneratorStatus_Release_Waiting;
- g_eventQueue_ptr->scheduleEvent(this, 1);
- }
- } else if (m_status == BarrierGeneratorStatus_StoreFlag_Pending) {
-
- // write flag
- if (m_local_sense) {
- data.writeByte( 0x01 );
- }
- else {
- data.writeByte( 0x00 );
- }
-
- m_status = BarrierGeneratorStatus_Release_Waiting;
- g_eventQueue_ptr->scheduleEvent(this, 1);
-
- } else if (m_status == BarrierGeneratorStatus_Release_Pending) {
- m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- // We're releasing the lock
- uint8 dat = data.readByte();
- dat = dat & 0x7F;
- data.writeByte(dat);
-
- if (m_barrier_done) {
- m_counter++;
- proc_counter++;
- if (m_counter < g_tester_length) {
- m_status = BarrierGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
- } else {
-
- m_driver.reportDone(proc_counter, m_node);
- m_last_transition = g_eventQueue_ptr->getTime();
- }
- }
- else {
- m_status = BarrierGeneratorStatus_SpinFlag_Ready;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- }
- } else if (m_status == BarrierGeneratorStatus_SpinFlag_Pending) {
-
- uint8 sense = data.readByte();
-
-
- if (sense != m_local_sense) {
- m_status = BarrierGeneratorStatus_SpinFlag_Ready;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- }
- else {
- m_counter++;
- proc_counter++;
- if (m_counter < g_tester_length) {
- m_status = BarrierGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
- } else {
- m_driver.reportDone(proc_counter, m_node);
- m_status = BarrierGeneratorStatus_Done;
- m_last_transition = g_eventQueue_ptr->getTime();
- }
- }
-
- } else {
- WARN_EXPR(m_status);
- ERROR_MSG("Invalid status");
- }
-}
-
-int BarrierGenerator::thinkTime()
-{
- int ret;
- float ratio = g_think_fudge_factor;
-
- // return 400;
-
- if (ratio == 0) {
- return g_think_time;
- }
-
- int r = random();
- int x = (int) ( (float)g_think_time*ratio*2.0);
- int mod = r % x;
-
-
- int rand = ( mod+1 - ((float)g_think_time*ratio) );
-
- ret = (g_think_time + rand);
-
- m_total_think += ret;
- m_think_periods++;
-
- return ret;
-}
-
-int BarrierGenerator::waitTime() const
-{
- return g_wait_time;
-}
-
-
-void BarrierGenerator::initiateTest()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Test");
- sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-void BarrierGenerator::initiateSwap()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Swap");
- sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_ATOMIC, Address(2), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-void BarrierGenerator::initiateRelease()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Release");
- sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-void BarrierGenerator::initiateLoadCtr()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
- sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_LD, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-void BarrierGenerator::initiateStoreCtr()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
- sequencer()->makeRequest(CacheMsg(Address(0x40), CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-void BarrierGenerator::initiateStoreFlag()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
- sequencer()->makeRequest(CacheMsg(Address(0x00), CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-void BarrierGenerator::initiateLoadFlag()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating load of barrier counter");
- sequencer()->makeRequest(CacheMsg(Address(0x00), CacheRequestType_LD, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, 0, false));
-}
-
-
-Sequencer* BarrierGenerator::sequencer() const
-{
- return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
-}
-
-void BarrierGenerator::print(ostream& out) const
-{
-}
-
diff --git a/src/mem/ruby/tester/BarrierGenerator.hh b/src/mem/ruby/tester/BarrierGenerator.hh
deleted file mode 100644
index e0fa497da..000000000
--- a/src/mem/ruby/tester/BarrierGenerator.hh
+++ /dev/null
@@ -1,138 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef BARRIERGENERATOR_H
-#define BARRIERGENERATOR_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Consumer.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/common/Address.hh"
-
-class Sequencer;
-class SubBlock;
-class SyntheticDriver;
-
-
-enum BarrierGeneratorStatus {
- BarrierGeneratorStatus_FIRST,
- BarrierGeneratorStatus_Thinking = BarrierGeneratorStatus_FIRST,
- BarrierGeneratorStatus_Test_Pending,
- BarrierGeneratorStatus_Test_Waiting,
- BarrierGeneratorStatus_Before_Swap,
- BarrierGeneratorStatus_Swap_Pending,
- BarrierGeneratorStatus_Holding,
- BarrierGeneratorStatus_Release_Pending,
- BarrierGeneratorStatus_Release_Waiting,
- BarrierGeneratorStatus_StoreFlag_Waiting,
- BarrierGeneratorStatus_StoreFlag_Pending,
- BarrierGeneratorStatus_Done,
- BarrierGeneratorStatus_SpinFlag_Ready,
- BarrierGeneratorStatus_SpinFlag_Pending,
- BarrierGeneratorStatus_LoadBarrierCounter_Pending,
- BarrierGeneratorStatus_StoreBarrierCounter_Pending,
- BarrierGeneratorStatus_StoreBarrierCounter_Waiting,
- BarrierGeneratorStatus_NUM
-};
-
-
-// UNCOMMENT THIS FOR A SINGLE WORK QUEUE
-// static int m_counter;
-
-class BarrierGenerator : public Consumer {
-public:
- // Constructors
- BarrierGenerator(NodeID node, SyntheticDriver& driver);
-
- // Destructor
- ~BarrierGenerator();
-
- // Public Methods
- void wakeup();
- void performCallback(NodeID proc, SubBlock& data);
-
- void print(ostream& out) const;
-private:
- // Private Methods
- int thinkTime() ;
- int waitTime() const;
- void initiateTest();
- void initiateSwap();
- void initiateRelease();
- void initiateLoadCtr();
- void initiateStoreCtr();
- void initiateLoadFlag();
- void initiateStoreFlag();
- Sequencer* sequencer() const;
-
- // Private copy constructor and assignment operator
- BarrierGenerator(const BarrierGenerator& obj);
- BarrierGenerator& operator=(const BarrierGenerator& obj);
-
- // Data Members (m_ prefix)
- SyntheticDriver& m_driver;
- NodeID m_node;
- BarrierGeneratorStatus m_status;
- int proc_counter;
-
- int m_counter;
-
- bool m_local_sense;
- bool m_barrier_done;
-
- Time m_last_transition;
- Address m_address;
-
- int m_total_think;
- int m_think_periods;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const BarrierGenerator& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const BarrierGenerator& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //REQUESTGENERATOR_H
-
diff --git a/src/mem/ruby/tester/Check.cc b/src/mem/ruby/tester/Check.cc
deleted file mode 100644
index 7896b572a..000000000
--- a/src/mem/ruby/tester/Check.cc
+++ /dev/null
@@ -1,310 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/tester/Check.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/protocol/Chip.hh"
-#include "mem/packet.hh"
-
-Check::Check(const Address& address, const Address& pc)
-{
- m_status = TesterStatus_Idle;
-
- pickValue();
- pickInitiatingNode();
- changeAddress(address);
- m_pc = pc;
- m_access_mode = AccessModeType(random() % AccessModeType_NUM);
- m_store_count = 0;
-}
-
-void Check::initiate()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating");
- DEBUG_EXPR(TESTER_COMP, MedPrio, *this);
-
- // current CMP protocol doesn't support prefetches
- if (!Protocol::m_CMP && (random() & 0xf) == 0) { // 1 in 16 chance
- initiatePrefetch(); // Prefetch from random processor
- }
-
- if(m_status == TesterStatus_Idle) {
- initiateAction();
- } else if(m_status == TesterStatus_Ready) {
- initiateCheck();
- } else {
- // Pending - do nothing
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating action/check - failed: action/check is pending\n");
- }
-}
-
-void Check::initiatePrefetch(Sequencer* targetSequencer_ptr)
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating prefetch");
-
- CacheRequestType type;
- if ((random() & 0x7) != 0) { // 1 in 8 chance
- if ((random() & 0x1) == 0) { // 50% chance
- type = CacheRequestType_LD;
- } else {
- type = CacheRequestType_IFETCH;
- }
- } else {
- type = CacheRequestType_ST;
- }
-
- Addr data_addr = m_address.getAddress();
- Addr pc_addr = m_pc.getAddress();
- Request request(0, data_addr, 0, Flags<unsigned int>(Request::PREFETCH), pc_addr, 0, 0);
- MemCmd::Command command;
- if (type == CacheRequestType_IFETCH) {
- command = MemCmd::ReadReq;
- request.setFlags(Request::INST_FETCH);
- } else if (type == CacheRequestType_LD || type == CacheRequestType_IFETCH) {
- command = MemCmd::ReadReq;
- } else if (type == CacheRequestType_ST) {
- command = MemCmd::WriteReq;
- } else if (type == CacheRequestType_ATOMIC) {
- command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
- } else {
- panic("Cannot convert request to packet");
- }
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
-
- assert(targetSequencer_ptr != NULL);
- if (targetSequencer_ptr->isReady(&pkt)) {
- targetSequencer_ptr->makeRequest(&pkt);
- }
-}
-
-void Check::initiatePrefetch()
-{
- // Any sequencer can issue a prefetch for this address
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(random() % RubyConfig::numberOfChips())->getSequencer(random() % RubyConfig::numberOfProcsPerChip());
- assert(targetSequencer_ptr != NULL);
- initiatePrefetch(targetSequencer_ptr);
-}
-
-void Check::initiateAction()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Action");
- assert(m_status == TesterStatus_Idle);
-
- CacheRequestType type = CacheRequestType_ST;
- if ((random() & 0x1) == 0) { // 50% chance
- type = CacheRequestType_ATOMIC;
- }
-
- Addr data_addr = m_address.getAddress()+m_store_count;
- Addr pc_addr = m_pc.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), pc_addr, 0, 0);
- MemCmd::Command command;
- if (type == CacheRequestType_IFETCH) {
- command = MemCmd::ReadReq;
- request.setFlags(Request::INST_FETCH);
- } else if (type == CacheRequestType_LD || type == CacheRequestType_IFETCH) {
- command = MemCmd::ReadReq;
- } else if (type == CacheRequestType_ST) {
- command = MemCmd::WriteReq;
- } else if (type == CacheRequestType_ATOMIC) {
- command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
- } else {
- panic("Cannot convert request to packet");
- }
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
-
- Sequencer* sequencer_ptr = initiatingSequencer();
- if (sequencer_ptr->isReady(&pkt) == false) {
- DEBUG_MSG(TESTER_COMP, MedPrio, "failed to initiate action - sequencer not ready\n");
- } else {
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating action - successful\n");
- DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
- m_status = TesterStatus_Action_Pending;
-
- sequencer_ptr->makeRequest(&pkt);
- }
- DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
-}
-
-void Check::initiateCheck()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Check");
- assert(m_status == TesterStatus_Ready);
-
- CacheRequestType type = CacheRequestType_LD;
- if ((random() & 0x1) == 0) { // 50% chance
- type = CacheRequestType_IFETCH;
- }
-
-
- Addr data_addr = m_address.getAddress()+m_store_count;
- Addr pc_addr = m_pc.getAddress();
- Request request(0, data_addr, CHECK_SIZE, Flags<unsigned int>(), pc_addr, 0, 0);
- MemCmd::Command command;
- if (type == CacheRequestType_IFETCH) {
- command = MemCmd::ReadReq;
- request.setFlags(Request::INST_FETCH);
- } else if (type == CacheRequestType_LD || type == CacheRequestType_IFETCH) {
- command = MemCmd::ReadReq;
- } else if (type == CacheRequestType_ST) {
- command = MemCmd::WriteReq;
- } else if (type == CacheRequestType_ATOMIC) {
- command = MemCmd::SwapReq; // TODO -- differentiate between atomic types
- } else {
- panic("Cannot convert request to packet");
- }
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
-
- Sequencer* sequencer_ptr = initiatingSequencer();
- if (sequencer_ptr->isReady(&pkt) == false) {
- DEBUG_MSG(TESTER_COMP, MedPrio, "failed to initiate check - sequencer not ready\n");
- } else {
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating check - successful\n");
- DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
- m_status = TesterStatus_Check_Pending;
-
- sequencer_ptr->makeRequest(&pkt);
- }
- DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
-}
-
-void Check::performCallback(NodeID proc, SubBlock& data)
-{
- Address address = data.getAddress();
- // assert(getAddress() == address); // This isn't exactly right since we now have multi-byte checks
- assert(getAddress().getLineAddress() == address.getLineAddress());
-
- DEBUG_MSG(TESTER_COMP, MedPrio, "Callback");
- DEBUG_EXPR(TESTER_COMP, MedPrio, *this);
-
- if (m_status == TesterStatus_Action_Pending) {
- DEBUG_MSG(TESTER_COMP, MedPrio, "Action callback");
- // Perform store
- data.setByte(0, m_value+m_store_count); // We store one byte at a time
- m_store_count++;
-
- if (m_store_count == CHECK_SIZE) {
- m_status = TesterStatus_Ready;
- } else {
- m_status = TesterStatus_Idle;
- }
- } else if (m_status == TesterStatus_Check_Pending) {
- DEBUG_MSG(TESTER_COMP, MedPrio, "Check callback");
- // Perform load/check
- for(int byte_number=0; byte_number<CHECK_SIZE; byte_number++) {
- if (uint8(m_value+byte_number) != data.getByte(byte_number) && (DATA_BLOCK == true)) {
- WARN_EXPR(proc);
- WARN_EXPR(address);
- WARN_EXPR(data);
- WARN_EXPR(byte_number);
- WARN_EXPR((int)m_value+byte_number);
- WARN_EXPR((int)data.getByte(byte_number));
- WARN_EXPR(*this);
- WARN_EXPR(g_eventQueue_ptr->getTime());
- ERROR_MSG("Action/check failure");
- }
- }
- DEBUG_MSG(TESTER_COMP, HighPrio, "Action/check success:");
- DEBUG_EXPR(TESTER_COMP, HighPrio, *this);
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
-
- m_status = TesterStatus_Idle;
- pickValue();
-
- } else {
- WARN_EXPR(*this);
- WARN_EXPR(proc);
- WARN_EXPR(data);
- WARN_EXPR(m_status);
- WARN_EXPR(g_eventQueue_ptr->getTime());
- ERROR_MSG("Unexpected TesterStatus");
- }
-
- DEBUG_EXPR(TESTER_COMP, MedPrio, proc);
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
- DEBUG_EXPR(TESTER_COMP, MedPrio, getAddress().getLineAddress());
- DEBUG_MSG(TESTER_COMP, MedPrio, "Callback done");
- DEBUG_EXPR(TESTER_COMP, MedPrio, *this);
-}
-
-void Check::changeAddress(const Address& address)
-{
- assert((m_status == TesterStatus_Idle) || (m_status == TesterStatus_Ready));
- m_status = TesterStatus_Idle;
- m_address = address;
- m_store_count = 0;
-}
-
-Sequencer* Check::initiatingSequencer() const
-{
- return g_system_ptr->getChip(m_initiatingNode/RubyConfig::numberOfProcsPerChip())->getSequencer(m_initiatingNode%RubyConfig::numberOfProcsPerChip());
-}
-
-void Check::pickValue()
-{
- assert(m_status == TesterStatus_Idle);
- m_status = TesterStatus_Idle;
- // DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
- DEBUG_MSG(TESTER_COMP, MedPrio, *this);
- m_value = random() & 0xff; // One byte
- // DEBUG_MSG(TESTER_COMP, MedPrio, m_value);
- DEBUG_MSG(TESTER_COMP, MedPrio, *this);
- m_store_count = 0;
-}
-
-void Check::pickInitiatingNode()
-{
- assert((m_status == TesterStatus_Idle) || (m_status == TesterStatus_Ready));
- m_status = TesterStatus_Idle;
- DEBUG_MSG(TESTER_COMP, MedPrio, m_status);
- m_initiatingNode = (random() % RubyConfig::numberOfProcessors());
- DEBUG_MSG(TESTER_COMP, MedPrio, m_initiatingNode);
- m_store_count = 0;
-}
-
-void Check::print(ostream& out) const
-{
- out << "["
- << m_address << ", value: "
- << (int) m_value << ", status: "
- << m_status << ", initiating node: "
- << m_initiatingNode << ", store_count: "
- << m_store_count
- << "]" << flush;
-}
diff --git a/src/mem/ruby/tester/Check.hh b/src/mem/ruby/tester/Check.hh
deleted file mode 100644
index 8f08b3f40..000000000
--- a/src/mem/ruby/tester/Check.hh
+++ /dev/null
@@ -1,107 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef CHECK_H
-#define CHECK_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/protocol/TesterStatus.hh"
-#include "mem/protocol/AccessModeType.hh"
-class Sequencer;
-class SubBlock;
-
-const int CHECK_SIZE_BITS = 2;
-const int CHECK_SIZE = (1<<CHECK_SIZE_BITS);
-
-class Check {
-public:
- // Constructors
- Check(const Address& address, const Address& pc);
-
- // Default Destructor
- //~Check();
-
- // Public Methods
-
- void initiate(); // Does Action or Check or nether
- void performCallback(NodeID proc, SubBlock& data);
- const Address& getAddress() { return m_address; }
- void changeAddress(const Address& address);
-
- void print(ostream& out) const;
-private:
- // Private Methods
- void initiatePrefetch(Sequencer* targetSequencer_ptr);
- void initiatePrefetch();
- void initiateAction();
- void initiateCheck();
-
- Sequencer* initiatingSequencer() const;
-
- void pickValue();
- void pickInitiatingNode();
-
- // Using default copy constructor and assignment operator
- // Check(const Check& obj);
- // Check& operator=(const Check& obj);
-
- // Data Members (m_ prefix)
- TesterStatus m_status;
- uint8 m_value;
- int m_store_count;
- NodeID m_initiatingNode;
- Address m_address;
- Address m_pc;
- AccessModeType m_access_mode;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const Check& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const Check& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //CHECK_H
diff --git a/src/mem/ruby/tester/CheckTable.cc b/src/mem/ruby/tester/CheckTable.cc
deleted file mode 100644
index b8e57a646..000000000
--- a/src/mem/ruby/tester/CheckTable.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/tester/CheckTable.hh"
-#include "mem/ruby/tester/Check.hh"
-#include "mem/gems_common/Map.hh"
-
-CheckTable::CheckTable()
-{
- m_lookup_map_ptr = new Map<Address, Check*>;
- physical_address_t physical = 0;
- Address address;
-
- const int size1 = 32;
- const int size2 = 100;
-
- // The first set is to get some false sharing
- physical = 1000;
- for (int i=0; i<size1; i++) {
- // Setup linear addresses
- address.setAddress(physical);
- addCheck(address);
- physical += CHECK_SIZE;
- }
-
- // The next two sets are to get some limited false sharing and cache conflicts
- physical = 1000;
- for (int i=0; i<size2; i++) {
- // Setup linear addresses
- address.setAddress(physical);
- addCheck(address);
- physical += 256;
- }
-
- physical = 1000 + CHECK_SIZE;
- for (int i=0; i<size2; i++) {
- // Setup linear addresses
- address.setAddress(physical);
- addCheck(address);
- physical += 256;
- }
-}
-
-CheckTable::~CheckTable()
-{
- int size = m_check_vector.size();
- for (int i=0; i<size; i++) {
- delete m_check_vector[i];
- }
- delete m_lookup_map_ptr;
-}
-
-void CheckTable::addCheck(const Address& address)
-{
- if (log_int(CHECK_SIZE) != 0) {
- if (address.bitSelect(0,CHECK_SIZE_BITS-1) != 0) {
- ERROR_MSG("Check not aligned");
- }
- }
-
- for (int i=0; i<CHECK_SIZE; i++) {
- if (m_lookup_map_ptr->exist(Address(address.getAddress()+i))) {
- // A mapping for this byte already existed, discard the entire check
- return;
- }
- }
-
- Check* check_ptr = new Check(address, Address(100+m_check_vector.size()));
- for (int i=0; i<CHECK_SIZE; i++) {
- // Insert it once per byte
- m_lookup_map_ptr->add(Address(address.getAddress()+i), check_ptr);
- }
- m_check_vector.insertAtBottom(check_ptr);
-}
-
-Check* CheckTable::getRandomCheck()
-{
- return m_check_vector[random() % m_check_vector.size()];
-}
-
-Check* CheckTable::getCheck(const Address& address)
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "Looking for check by address");
- DEBUG_EXPR(TESTER_COMP, MedPrio, address);
-
- if (m_lookup_map_ptr->exist(address)) {
- Check* check = m_lookup_map_ptr->lookup(address);
- assert(check != NULL);
- return check;
- } else {
- return NULL;
- }
-}
-
-void CheckTable::print(ostream& out) const
-{
-}
diff --git a/src/mem/ruby/tester/DetermGETXGenerator.cc b/src/mem/ruby/tester/DetermGETXGenerator.cc
index e4d8addd2..6692fb80c 100644
--- a/src/mem/ruby/tester/DetermGETXGenerator.cc
+++ b/src/mem/ruby/tester/DetermGETXGenerator.cc
@@ -37,26 +37,23 @@
#include "mem/ruby/tester/DetermGETXGenerator.hh"
#include "mem/protocol/DetermGETXGeneratorStatus.hh"
-#include "mem/protocol/LockStatus.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/tester/DeterministicDriver.hh"
-#include "mem/protocol/Chip.hh"
-#include "mem/packet.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
+#include "mem/ruby/tester/SpecifiedGenerator.hh"
+//#include "DMAController.hh"
+#include "mem/ruby/libruby.hh"
-DetermGETXGenerator::DetermGETXGenerator(NodeID node, DeterministicDriver& driver) :
- m_driver(driver)
+
+DetermGETXGenerator::DetermGETXGenerator(NodeID node, DeterministicDriver * driver)
{
m_status = DetermGETXGeneratorStatus_Thinking;
m_last_transition = 0;
m_node = node;
m_address = Address(9999); // initialize to null value
m_counter = 0;
-
+ parent_driver = driver;
// don't know exactly when this node needs to request so just guess randomly
- g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+ parent_driver->eventQueue->scheduleEvent(this, 1+(random() % 200));
}
DetermGETXGenerator::~DetermGETXGenerator()
@@ -70,13 +67,13 @@ void DetermGETXGenerator::wakeup()
// determine if this node is next for the GETX round robin request
if (m_status == DetermGETXGeneratorStatus_Thinking) {
- if (m_driver.isStoreReady(m_node)) {
+ if (parent_driver->isStoreReady(m_node)) {
pickAddress();
m_status = DetermGETXGeneratorStatus_Store_Pending; // Store Pending
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = parent_driver->eventQueue->getTime();
initiateStore(); // GETX
} else { // I'll check again later
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ parent_driver->eventQueue->scheduleEvent(this, thinkTime());
}
} else {
WARN_EXPR(m_status);
@@ -85,31 +82,28 @@ void DetermGETXGenerator::wakeup()
}
-void DetermGETXGenerator::performCallback(NodeID proc, SubBlock& data)
+void DetermGETXGenerator::performCallback(NodeID proc, Address address)
{
- Address address = data.getAddress();
assert(proc == m_node);
assert(address == m_address);
DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
DEBUG_EXPR(TESTER_COMP, LowPrio, address);
- DEBUG_EXPR(TESTER_COMP, LowPrio, data);
if (m_status == DetermGETXGeneratorStatus_Store_Pending) {
- m_driver.recordStoreLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- data.writeByte(m_node);
- m_driver.storeCompleted(m_node, data.getAddress()); // advance the store queue
+ parent_driver->recordStoreLatency(parent_driver->eventQueue->getTime() - m_last_transition);
+ parent_driver->storeCompleted(m_node, address); // advance the store queue
m_counter++;
- if (m_counter < g_tester_length) {
+ if (m_counter < parent_driver->m_tester_length) {
m_status = DetermGETXGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_last_transition = parent_driver->eventQueue->getTime();
+ parent_driver->eventQueue->scheduleEvent(this, waitTime());
} else {
- m_driver.reportDone();
+ parent_driver->reportDone();
m_status = DetermGETXGeneratorStatus_Done;
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = parent_driver->eventQueue->getTime();
}
} else {
@@ -120,38 +114,40 @@ void DetermGETXGenerator::performCallback(NodeID proc, SubBlock& data)
int DetermGETXGenerator::thinkTime() const
{
- return g_think_time;
+ return parent_driver->m_think_time;
}
int DetermGETXGenerator::waitTime() const
{
- return g_wait_time;
+ return parent_driver->m_wait_time;
}
void DetermGETXGenerator::pickAddress()
{
assert(m_status == DetermGETXGeneratorStatus_Thinking);
- m_address = m_driver.getNextStoreAddr(m_node);
+ m_address = parent_driver->getNextStoreAddr(m_node);
}
void DetermGETXGenerator::initiateStore()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Store");
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
- MemCmd::Command command;
- command = MemCmd::WriteReq;
+ uint8_t *write_data = new uint8_t[64];
+ for(int i=0; i < 64; i++) {
+ write_data[i] = m_node;
+ }
+
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_node);
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(m_address.getAddress(), write_data, 64, 0, RubyRequestType_ST, RubyAccessMode_Supervisor));
- sequencer()->makeRequest(&pkt);
-}
+ // delete [] write_data;
-Sequencer* DetermGETXGenerator::sequencer() const
-{
- return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+ ASSERT(parent_driver->requests.find(request_id) == parent_driver->requests.end());
+ parent_driver->requests.insert(make_pair(request_id, make_pair(m_node, m_address)));
}
void DetermGETXGenerator::print(ostream& out) const
diff --git a/src/mem/ruby/tester/DetermGETXGenerator.hh b/src/mem/ruby/tester/DetermGETXGenerator.hh
index 1f5b67653..82e616e4b 100644
--- a/src/mem/ruby/tester/DetermGETXGenerator.hh
+++ b/src/mem/ruby/tester/DetermGETXGenerator.hh
@@ -40,28 +40,26 @@
#ifndef DETERMGETXGENERATOR_H
#define DETERMGETXGENERATOR_H
-#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/protocol/DetermGETXGeneratorStatus.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/common/Address.hh"
+#include "Address_Tester.hh"
#include "mem/ruby/tester/SpecifiedGenerator.hh"
-class Sequencer;
-class SubBlock;
class DeterministicDriver;
+class DMAController;
class DetermGETXGenerator : public SpecifiedGenerator {
public:
// Constructors
- DetermGETXGenerator(NodeID node, DeterministicDriver& driver);
+ DetermGETXGenerator(NodeID node, DeterministicDriver * driver);
// Destructor
~DetermGETXGenerator();
// Public Methods
void wakeup();
- void performCallback(NodeID proc, SubBlock& data);
+ void performCallback(NodeID proc, Address address);
void print(ostream& out) const;
private:
@@ -69,20 +67,21 @@ private:
int thinkTime() const;
int waitTime() const;
void initiateStore();
+ void initiateDMA();
void pickAddress();
- Sequencer* sequencer() const;
+ DMAController* dma() const;
// copy constructor and assignment operator
DetermGETXGenerator(const DetermGETXGenerator& obj);
DetermGETXGenerator& operator=(const DetermGETXGenerator& obj);
+ DeterministicDriver * parent_driver;
// Data Members (m_ prefix)
DetermGETXGeneratorStatus m_status;
int m_counter;
Address m_address;
NodeID m_node;
- DeterministicDriver& m_driver;
Time m_last_transition;
};
diff --git a/src/mem/ruby/tester/DetermInvGenerator.cc b/src/mem/ruby/tester/DetermInvGenerator.cc
index bafaa18ae..eebe18057 100644
--- a/src/mem/ruby/tester/DetermInvGenerator.cc
+++ b/src/mem/ruby/tester/DetermInvGenerator.cc
@@ -38,13 +38,10 @@
#include "mem/ruby/tester/DetermInvGenerator.hh"
#include "mem/protocol/DetermInvGeneratorStatus.hh"
-#include "mem/protocol/LockStatus.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/tester/DeterministicDriver.hh"
-#include "mem/protocol/Chip.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
+//#include "DMAController.hh"
+#include "mem/ruby/libruby.hh"
DetermInvGenerator::DetermInvGenerator(NodeID node, DeterministicDriver& driver) :
m_driver(driver)
@@ -54,9 +51,8 @@ DetermInvGenerator::DetermInvGenerator(NodeID node, DeterministicDriver& driver)
m_node = node;
m_address = Address(9999); // initiate to a NULL value
m_counter = 0;
-
// don't know exactly when this node needs to request so just guess randomly
- g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+ m_driver.eventQueue->scheduleEvent(this, 1+(random() % 200));
}
DetermInvGenerator::~DetermInvGenerator()
@@ -74,23 +70,23 @@ void DetermInvGenerator::wakeup()
if (m_driver.isLoadReady(m_node) && m_counter == m_driver.getStoresCompleted()) {
pickLoadAddress();
m_status = DetermInvGeneratorStatus_Load_Pending; // Load Pending
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = m_driver.eventQueue->getTime();
initiateLoad(); // GETS
} else { // I'll check again later
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_driver.eventQueue->scheduleEvent(this, thinkTime());
}
} else if (m_status == DetermInvGeneratorStatus_Load_Complete) {
if (m_driver.isStoreReady(m_node, m_address)) { // do a store in this transaction or start the next one
if (m_driver.isLoadReady((0), m_address)) { // everyone is in S for this address i.e. back to node 0
m_status = DetermInvGeneratorStatus_Store_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = m_driver.eventQueue->getTime();
initiateStore(); // GETX
} else { // I'm next, I just have to wait for all loads to complete
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_driver.eventQueue->scheduleEvent(this, thinkTime());
}
} else { // I'm not next to store, go back to thinking
m_status = DetermInvGeneratorStatus_Thinking;
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_driver.eventQueue->scheduleEvent(this, thinkTime());
}
} else {
WARN_EXPR(m_status);
@@ -99,48 +95,48 @@ void DetermInvGenerator::wakeup()
}
-void DetermInvGenerator::performCallback(NodeID proc, SubBlock& data)
+void DetermInvGenerator::performCallback(NodeID proc, Address address)
{
- Address address = data.getAddress();
assert(proc == m_node);
assert(address == m_address);
DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
DEBUG_EXPR(TESTER_COMP, LowPrio, address);
- DEBUG_EXPR(TESTER_COMP, LowPrio, data);
if (m_status == DetermInvGeneratorStatus_Load_Pending) {
- m_driver.recordLoadLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- m_driver.loadCompleted(m_node, data.getAddress());
+ m_driver.recordLoadLatency(m_driver.eventQueue->getTime() - m_last_transition);
+ //NodeID firstByte = data.readByte(); // dummy read
+
+ m_driver.loadCompleted(m_node, address);
if (!m_driver.isStoreReady(m_node, m_address)) { // if we don't have to store, we are done for this transaction
m_counter++;
}
- if (m_counter < g_tester_length) {
+ if (m_counter < m_driver.m_tester_length) {
m_status = DetermInvGeneratorStatus_Load_Complete;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_last_transition = m_driver.eventQueue->getTime();
+ m_driver.eventQueue->scheduleEvent(this, waitTime());
} else {
m_driver.reportDone();
m_status = DetermInvGeneratorStatus_Done;
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = m_driver.eventQueue->getTime();
}
} else if (m_status == DetermInvGeneratorStatus_Store_Pending) {
- m_driver.recordStoreLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- data.writeByte(m_node);
- m_driver.storeCompleted(m_node, data.getAddress()); // advance the store queue
+ m_driver.recordStoreLatency(m_driver.eventQueue->getTime() - m_last_transition);
+ //data.writeByte(m_node);
+ m_driver.storeCompleted(m_node, address); // advance the store queue
m_counter++;
- if (m_counter < g_tester_length) {
+ if (m_counter < m_driver.m_tester_length) {
m_status = DetermInvGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_last_transition = m_driver.eventQueue->getTime();
+ m_driver.eventQueue->scheduleEvent(this, waitTime());
} else {
m_driver.reportDone();
m_status = DetermInvGeneratorStatus_Done;
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = m_driver.eventQueue->getTime();
}
} else {
WARN_EXPR(m_status);
@@ -150,23 +146,22 @@ void DetermInvGenerator::performCallback(NodeID proc, SubBlock& data)
DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
DEBUG_EXPR(TESTER_COMP, LowPrio, address);
- DEBUG_EXPR(TESTER_COMP, LowPrio, data);
}
int DetermInvGenerator::thinkTime() const
{
- return g_think_time;
+ return m_driver.m_think_time;
}
int DetermInvGenerator::waitTime() const
{
- return g_wait_time;
+ return m_driver.m_wait_time;
}
int DetermInvGenerator::holdTime() const
{
- return g_hold_time;
+ assert(0);
}
void DetermInvGenerator::pickLoadAddress()
@@ -179,35 +174,42 @@ void DetermInvGenerator::pickLoadAddress()
void DetermInvGenerator::initiateLoad()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Load");
+ // sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_LD, Address(1), AccessModeType_UserMode, 1, PrefetchBit_No, Address(0), 0 /* only 1 SMT thread */));
+ uint8_t * read_data = new uint8_t[64];
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 1, 0, 0);
- MemCmd::Command command;
- command = MemCmd::ReadReq;
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_node);
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(m_address.getAddress(), read_data, 64, 0, RubyRequestType_LD, RubyAccessMode_Supervisor));
- sequencer()->makeRequest(&pkt);
+ //delete [] read_data;
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_node, m_address)));
}
void DetermInvGenerator::initiateStore()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Store");
+ // sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_ST, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, Address(0), 0 /* only 1 SMT thread */));
+ uint8_t *write_data = new uint8_t[64];
+ for(int i=0; i < 64; i++) {
+ write_data[i] = m_node;
+ }
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
- MemCmd::Command command;
- command = MemCmd::WriteReq;
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_node);
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(m_address.getAddress(), write_data, 64, 0, RubyRequestType_ST, RubyAccessMode_Supervisor));
- sequencer()->makeRequest(&pkt);
-}
+ //delete [] write_data;
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_node, m_address)));
-Sequencer* DetermInvGenerator::sequencer() const
-{
- return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
}
void DetermInvGenerator::print(ostream& out) const
diff --git a/src/mem/ruby/tester/DetermInvGenerator.hh b/src/mem/ruby/tester/DetermInvGenerator.hh
index 4f0712fbe..6127c3af4 100644
--- a/src/mem/ruby/tester/DetermInvGenerator.hh
+++ b/src/mem/ruby/tester/DetermInvGenerator.hh
@@ -41,15 +41,12 @@
#ifndef DETERMINVGENERATOR_H
#define DETERMINVGENERATOR_H
-#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/protocol/DetermInvGeneratorStatus.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/common/Address.hh"
+#include "Address_Tester.hh"
#include "mem/ruby/tester/SpecifiedGenerator.hh"
-class Sequencer;
-class SubBlock;
class DeterministicDriver;
class DetermInvGenerator : public SpecifiedGenerator {
@@ -62,7 +59,7 @@ public:
// Public Methods
void wakeup();
- void performCallback(NodeID proc, SubBlock& data);
+ void performCallback(NodeID proc, Address address);
void print(ostream& out) const;
private:
@@ -75,8 +72,6 @@ private:
void pickLoadAddress();
void pickStoreAddress();
- Sequencer* sequencer() const;
-
// copy constructor and assignment operator
DetermInvGenerator(const DetermInvGenerator& obj);
DetermInvGenerator& operator=(const DetermInvGenerator& obj);
diff --git a/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc b/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc
index 5adc7aa5c..38688f10d 100644
--- a/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc
+++ b/src/mem/ruby/tester/DetermSeriesGETSGenerator.cc
@@ -34,13 +34,7 @@
#include "mem/ruby/tester/DetermSeriesGETSGenerator.hh"
#include "mem/protocol/DetermSeriesGETSGeneratorStatus.hh"
-#include "mem/protocol/LockStatus.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/tester/DeterministicDriver.hh"
-#include "mem/protocol/Chip.hh"
DetermSeriesGETSGenerator::DetermSeriesGETSGenerator(NodeID node, DeterministicDriver& driver) :
m_driver(driver)
@@ -51,8 +45,9 @@ DetermSeriesGETSGenerator::DetermSeriesGETSGenerator(NodeID node, DeterministicD
m_address = Address(9999); // initialize to null value
m_counter = 0;
+
// don't know exactly when this node needs to request so just guess randomly
- g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
+ m_driver.eventQueue->scheduleEvent(this, 1+(random() % 200));
}
DetermSeriesGETSGenerator::~DetermSeriesGETSGenerator()
@@ -69,10 +64,10 @@ void DetermSeriesGETSGenerator::wakeup()
if (m_driver.isLoadReady(m_node)) {
pickAddress();
m_status = DetermSeriesGETSGeneratorStatus_Load_Pending; // Load Pending
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = m_driver.eventQueue->getTime();
initiateLoad(); // SeriesGETS
} else { // I'll check again later
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
+ m_driver.eventQueue->scheduleEvent(this, thinkTime());
}
} else {
WARN_EXPR(m_status);
@@ -81,32 +76,30 @@ void DetermSeriesGETSGenerator::wakeup()
}
-void DetermSeriesGETSGenerator::performCallback(NodeID proc, SubBlock& data)
+void DetermSeriesGETSGenerator::performCallback(NodeID proc, Address address)
{
- Address address = data.getAddress();
assert(proc == m_node);
assert(address == m_address);
DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
DEBUG_EXPR(TESTER_COMP, LowPrio, address);
- DEBUG_EXPR(TESTER_COMP, LowPrio, data);
if (m_status == DetermSeriesGETSGeneratorStatus_Load_Pending) {
- m_driver.recordLoadLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- data.writeByte(m_node);
- m_driver.loadCompleted(m_node, data.getAddress()); // advance the load queue
+ m_driver.recordLoadLatency(m_driver.eventQueue->getTime() - m_last_transition);
+ //data.writeByte(m_node);
+ m_driver.loadCompleted(m_node, address); // advance the load queue
m_counter++;
// do we still have more requests to complete before the next proc starts?
- if (m_counter < g_tester_length*g_NUM_COMPLETIONS_BEFORE_PASS) {
+ if (m_counter < m_driver.m_tester_length*m_driver.m_numCompletionsPerNode) {
m_status = DetermSeriesGETSGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
+ m_last_transition = m_driver.eventQueue->getTime();
+ m_driver.eventQueue->scheduleEvent(this, waitTime());
} else {
m_driver.reportDone();
m_status = DetermSeriesGETSGeneratorStatus_Done;
- m_last_transition = g_eventQueue_ptr->getTime();
+ m_last_transition = m_driver.eventQueue->getTime();
}
} else {
@@ -117,12 +110,12 @@ void DetermSeriesGETSGenerator::performCallback(NodeID proc, SubBlock& data)
int DetermSeriesGETSGenerator::thinkTime() const
{
- return g_think_time;
+ return m_driver.m_think_time;
}
int DetermSeriesGETSGenerator::waitTime() const
{
- return g_wait_time;
+ return m_driver.m_wait_time;
}
void DetermSeriesGETSGenerator::pickAddress()
@@ -135,21 +128,20 @@ void DetermSeriesGETSGenerator::pickAddress()
void DetermSeriesGETSGenerator::initiateLoad()
{
DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Load");
+ //sequencer()->makeRequest(CacheMsg(m_address, m_address, CacheRequestType_IFETCH, Address(3), AccessModeType_UserMode, 1, PrefetchBit_No, Address(0), 0 /* only 1 SMT thread */));
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
- MemCmd::Command command;
- command = MemCmd::ReadReq;
- request.setFlags(Request::INST_FETCH);
+ uint8_t *read_data = new uint8_t[64];
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_node);
- sequencer()->makeRequest(&pkt);
-}
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(m_address.getAddress(), read_data, 64, 0, RubyRequestType_LD, RubyAccessMode_Supervisor));
-Sequencer* DetermSeriesGETSGenerator::sequencer() const
-{
- return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
+ //delete [] read_data;
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_node, m_address)));
}
void DetermSeriesGETSGenerator::print(ostream& out) const
diff --git a/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh b/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh
index 1e44dc3bc..225e45a11 100644
--- a/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh
+++ b/src/mem/ruby/tester/DetermSeriesGETSGenerator.hh
@@ -42,15 +42,12 @@
#ifndef DETERMSERIESGETSGENERATOR_H
#define DETERMSERIESGETSGENERATOR_H
-#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/protocol/DetermSeriesGETSGeneratorStatus.hh"
-#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/common/Address.hh"
+#include "Address_Tester.hh"
#include "mem/ruby/tester/SpecifiedGenerator.hh"
-class Sequencer;
-class SubBlock;
class DeterministicDriver;
class DetermSeriesGETSGenerator : public SpecifiedGenerator {
@@ -63,7 +60,7 @@ public:
// Public Methods
void wakeup();
- void performCallback(NodeID proc, SubBlock& data);
+ void performCallback(NodeID proc, Address address);
void print(ostream& out) const;
private:
@@ -73,8 +70,6 @@ private:
void initiateLoad();
void pickAddress();
- Sequencer* sequencer() const;
-
// copy constructor and assignment operator
DetermSeriesGETSGenerator(const DetermSeriesGETSGenerator& obj);
DetermSeriesGETSGenerator& operator=(const DetermSeriesGETSGenerator& obj);
diff --git a/src/mem/ruby/tester/DeterministicDriver.cc b/src/mem/ruby/tester/DeterministicDriver.cc
index 762672118..54b5f5e0d 100644
--- a/src/mem/ruby/tester/DeterministicDriver.cc
+++ b/src/mem/ruby/tester/DeterministicDriver.cc
@@ -32,67 +32,79 @@
*
*/
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/System.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/tester/DeterministicDriver.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/ruby/tester/SpecifiedGenerator.hh"
+#include "mem/ruby/tester/EventQueue_Tester.hh"
+//#include "DMAGenerator.hh"
#include "mem/ruby/tester/DetermGETXGenerator.hh"
-#include "mem/ruby/tester/DetermInvGenerator.hh"
-#include "mem/ruby/tester/DetermSeriesGETSGenerator.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/protocol/Chip.hh"
-#include "mem/packet.hh"
-DeterministicDriver::DeterministicDriver(RubySystem* sys_ptr)
+#define DATA_BLOCK_BYTES 64
+
+DeterministicDriver::DeterministicDriver(string generator_type, int num_completions, int num_procs, Time g_think_time, Time g_wait_time, int g_tester_length)
{
+ eventQueue = new RubyEventQueue;
m_finish_time = 0;
m_last_issue = -11;
m_done_counter = 0;
m_loads_completed = 0;
m_stores_completed = 0;
- m_numCompletionsPerNode = g_NUM_COMPLETIONS_BEFORE_PASS;
+ m_numCompletionsPerNode = num_completions;
+ m_num_procs = num_procs;
+ m_think_time = g_think_time;
+ m_wait_time = g_wait_time;
+ m_tester_length = g_tester_length;
+
- m_last_progress_vector.setSize(RubyConfig::numberOfProcessors());
+ m_last_progress_vector.setSize(num_procs);
for (int i=0; i<m_last_progress_vector.size(); i++) {
m_last_progress_vector[i] = 0;
}
- m_load_vector.setSize(g_deterministic_addrs);
+ m_load_vector.setSize(10);
for (int i=0; i<m_load_vector.size(); i++) {
m_load_vector[i] = -1; // No processor last held it
}
- m_store_vector.setSize(g_deterministic_addrs);
+ m_store_vector.setSize(10);
for (int i=0; i<m_store_vector.size(); i++) {
m_store_vector[i] = -1; // No processor last held it
}
- m_generator_vector.setSize(RubyConfig::numberOfProcessors());
+ m_generator_vector.setSize(num_procs);
- SpecifiedGeneratorType generator = string_to_SpecifiedGeneratorType(g_SpecifiedGenerator);
+ int generator = string_to_SpecifiedGeneratorType(generator_type);
for (int i=0; i<m_generator_vector.size(); i++) {
switch (generator) {
case SpecifiedGeneratorType_DetermGETXGenerator:
- m_generator_vector[i] = new DetermGETXGenerator(i, *this);
- break;
- case SpecifiedGeneratorType_DetermSeriesGETSGenerator:
- m_generator_vector[i] = new DetermSeriesGETSGenerator(i, *this);
+ m_generator_vector[i] = new DetermGETXGenerator(i, this);
break;
case SpecifiedGeneratorType_DetermInvGenerator:
m_generator_vector[i] = new DetermInvGenerator(i, *this);
break;
+ case SpecifiedGeneratorType_DetermSeriesGETSGenerator:
+ m_generator_vector[i] = new DetermSeriesGETSGenerator(i, *this);
+ break;
default:
ERROR_MSG("Unexpected specified generator type");
}
}
- // add the tester consumer to the global event queue
- g_eventQueue_ptr->scheduleEvent(this, 1);
+ //m_dma_generator = new DMAGenerator(0, this);
}
+
+void DeterministicDriver::go()
+{
+ // tick both queues until everyone is done
+ while (m_done_counter != m_num_procs) {
+ libruby_tick(1);
+ eventQueue->triggerEvents(eventQueue->getTime() + 1);
+ }
+}
+
+
DeterministicDriver::~DeterministicDriver()
{
for (int i=0; i<m_last_progress_vector.size(); i++) {
@@ -100,18 +112,27 @@ DeterministicDriver::~DeterministicDriver()
}
}
-void
-DeterministicDriver::hitCallback(Packet * pkt)
+//void DeterministicDriver::dmaHitCallback()
+//{
+// m_dma_generator->performCallback();
+//}
+
+void DeterministicDriver::wakeup() {
+ assert(0);
+ // this shouldn't be called as we are not scheduling the driver ever
+}
+
+void DeterministicDriver::hitCallback(int64_t request_id)
{
- NodeID proc = pkt->req->contextId();
- SubBlock data(Address(pkt->getAddr()), pkt->req->getSize());
- if (pkt->hasData()) {
- for (int i = 0; i < pkt->req->getSize(); i++) {
- data.setByte(i, *(pkt->getPtr<uint8>()+i));
- }
- }
- m_generator_vector[proc]->performCallback(proc, data);
- m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
+ ASSERT(requests.find(request_id) != requests.end());
+ int proc = requests[request_id].first;
+ Address address = requests[request_id].second;
+
+ m_generator_vector[proc]->performCallback(proc, address);
+
+ m_last_progress_vector[proc] = eventQueue->getTime();
+
+ requests.erase(request_id);
}
bool DeterministicDriver::isStoreReady(NodeID node)
@@ -121,6 +142,8 @@ bool DeterministicDriver::isStoreReady(NodeID node)
bool DeterministicDriver::isStoreReady(NodeID node, Address addr)
{
+ int addr_number = addr.getAddress()/DATA_BLOCK_BYTES;
+
return isAddrReady(node, m_store_vector, addr);
}
@@ -138,25 +161,26 @@ bool DeterministicDriver::isLoadReady(NodeID node, Address addr)
bool DeterministicDriver::isAddrReady(NodeID node, Vector<NodeID> addr_vector)
{
for (int i=0; i<addr_vector.size(); i++) {
- if (((addr_vector[i]+1)%RubyConfig::numberOfProcessors() == node) &&
+ if (((addr_vector[i]+1)%m_num_procs == node) &&
(m_loads_completed+m_stores_completed >= m_numCompletionsPerNode*node) && // is this node next
- (g_eventQueue_ptr->getTime() >= m_last_issue + 10)) { // controll rate of requests
+ (eventQueue->getTime() >= m_last_issue + 10)) { // controll rate of requests
return true;
}
}
+
return false;
}
// test for a particular addr
bool DeterministicDriver::isAddrReady(NodeID node, Vector<NodeID> addr_vector, Address addr)
{
- int addr_number = addr.getAddress()/RubyConfig::dataBlockBytes();
+ int addr_number = addr.getAddress()/DATA_BLOCK_BYTES;
ASSERT ((addr_number >= 0) && (addr_number < addr_vector.size()));
- if (((addr_vector[addr_number]+1)%RubyConfig::numberOfProcessors() == node) &&
+ if (((addr_vector[addr_number]+1)%m_num_procs == node) &&
(m_loads_completed+m_stores_completed >= m_numCompletionsPerNode*node) && // is this node next
- (g_eventQueue_ptr->getTime() >= m_last_issue + 10)) { // controll rate of requests
+ (eventQueue->getTime() >= m_last_issue + 10)) { // controll rate of requests
return true;
} else {
return false;
@@ -178,7 +202,7 @@ void DeterministicDriver::storeCompleted(NodeID node, Address addr)
void DeterministicDriver::setNextAddr(NodeID node, Address addr, Vector<NodeID>& addr_vector)
{
// mark the addr vector that this proc was the last to use the particular address
- int addr_number = addr.getAddress()/RubyConfig::dataBlockBytes();
+ int addr_number = addr.getAddress()/DATA_BLOCK_BYTES;
addr_vector[addr_number] = node;
}
@@ -204,17 +228,16 @@ Address DeterministicDriver::getNextAddr(NodeID node, Vector<NodeID> addr_vector
ASSERT(isAddrReady(node, addr_vector));
for (int addr_number=0; addr_number<addr_vector.size(); addr_number++) {
- //for (int addr_number=addr_vector.size()-1; addr_number>0; addr_number--) {
// is this node next in line for the addr
- if (((addr_vector[addr_number]+1)%RubyConfig::numberOfProcessors()) == node) {
+ if ((addr_vector[addr_number] != 1) && ((addr_vector[addr_number]+1)%m_num_procs) == node) {
// One addr per cache line
- addr.setAddress(addr_number * RubyConfig::dataBlockBytes());
+ addr.setAddress(addr_number * DATA_BLOCK_BYTES);
}
}
- m_last_issue = g_eventQueue_ptr->getTime();
+ m_last_issue = eventQueue->getTime();
return addr;
}
@@ -223,9 +246,9 @@ Address DeterministicDriver::getNextAddr(NodeID node, Vector<NodeID> addr_vector
void DeterministicDriver::reportDone()
{
m_done_counter++;
- if ((m_done_counter == RubyConfig::numberOfProcessors())) {
- //|| (m_done_counter == g_tester_length)) {
- m_finish_time = g_eventQueue_ptr->getTime();
+ if ((m_done_counter == m_num_procs)) {
+ m_finish_time = eventQueue->getTime();
+ //m_dma_generator->stop();
}
}
@@ -239,36 +262,6 @@ void DeterministicDriver::recordStoreLatency(Time time)
m_store_latency.add(time);
}
-void DeterministicDriver::wakeup()
-{
- // checkForDeadlock();
- if (m_done_counter < RubyConfig::numberOfProcessors()) {
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
- }
-}
-
-void DeterministicDriver::checkForDeadlock()
-{
- int size = m_last_progress_vector.size();
- Time current_time = g_eventQueue_ptr->getTime();
- for (int processor=0; processor<size; processor++) {
- if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
- WARN_EXPR(processor);
-#ifndef NDEBUG
- Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
-#endif
- assert(seq_ptr != NULL);
- // if (seq_ptr->isRequestPending()) {
- // WARN_EXPR(seq_ptr->pendingAddress());
- // }
- WARN_EXPR(current_time);
- WARN_EXPR(m_last_progress_vector[processor]);
- WARN_EXPR(current_time - m_last_progress_vector[processor]);
- ERROR_MSG("Deadlock detected.");
- }
- }
-}
-
void DeterministicDriver::printStats(ostream& out) const
{
out << endl;
diff --git a/src/mem/ruby/tester/DeterministicDriver.hh b/src/mem/ruby/tester/DeterministicDriver.hh
index 710da7922..288ad5a15 100644
--- a/src/mem/ruby/tester/DeterministicDriver.hh
+++ b/src/mem/ruby/tester/DeterministicDriver.hh
@@ -36,25 +36,35 @@
#ifndef DETERMINISTICDRIVER_H
#define DETERMINISTICDRIVER_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Driver.hh"
-#include "mem/ruby/common/Histogram.hh"
-#include "mem/protocol/CacheRequestType.hh"
-
-class RubySystem;
-class SpecifiedGenerator;
-class Packet;
-
-class DeterministicDriver : public Driver, public Consumer {
+#include <map>
+#include "mem/ruby/tester/Global_Tester.hh"
+#include "mem/ruby/common/Histogram.hh" // includes global, but doesn't use anything, so it should be fine
+#include "mem/protocol/CacheRequestType.hh" // includes global, but doesn't use anything, so it should be fine
+#include "Address_Tester.hh" // we redefined the address
+#include "mem/ruby/tester/DetermGETXGenerator.hh" // this is our file
+#include "mem/ruby/tester/DetermSeriesGETSGenerator.hh" // this is our file
+#include "mem/ruby/tester/DetermInvGenerator.hh" // this is our file
+#include "mem/ruby/libruby.hh"
+#include "mem/ruby/tester/Driver_Tester.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/tester/EventQueue_Tester.hh"
+#include "mem/protocol/SpecifiedGeneratorType.hh"
+
+//class DMAGenerator;
+
+class DeterministicDriver : public Driver_Tester, public Consumer {
public:
+ friend class DetermGETXGenerator;
+ friend class DetermSeriesGETSGenerator;
+ friend class DetermInvGenerator;
// Constructors
- DeterministicDriver(RubySystem* sys_ptr);
+ DeterministicDriver(string generator_type, int num_completions, int num_procs, Time g_think_time, Time g_wait_time, int g_tester_length);
// Destructor
~DeterministicDriver();
// Public Methods
+ void go();
bool isStoreReady(NodeID node);
bool isLoadReady(NodeID node);
bool isStoreReady(NodeID node, Address addr);
@@ -70,37 +80,47 @@ public:
void recordLoadLatency(Time time);
void recordStoreLatency(Time time);
- void hitCallback(Packet* pkt);
+// void dmaHitCallback();
+ void hitCallback(int64_t request_id);
void wakeup();
void printStats(ostream& out) const;
void clearStats() {}
void printConfig(ostream& out) const {}
void print(ostream& out) const;
+ // Public copy constructor and assignment operator
+ DeterministicDriver(const DeterministicDriver& obj);
+ DeterministicDriver& operator=(const DeterministicDriver& obj);
+
private:
// Private Methods
- void checkForDeadlock();
Address getNextAddr(NodeID node, Vector<NodeID> addr_vector);
bool isAddrReady(NodeID node, Vector<NodeID> addr_vector);
bool isAddrReady(NodeID node, Vector<NodeID> addr_vector, Address addr);
void setNextAddr(NodeID node, Address addr, Vector<NodeID>& addr_vector);
- // Private copy constructor and assignment operator
- DeterministicDriver(const DeterministicDriver& obj);
- DeterministicDriver& operator=(const DeterministicDriver& obj);
// Data Members (m_ prefix)
Vector<Time> m_last_progress_vector;
Vector<SpecifiedGenerator*> m_generator_vector;
+ //DMAGenerator* m_dma_generator;
Vector<NodeID> m_load_vector; // Processor last to load the addr
Vector<NodeID> m_store_vector; // Processor last to store the addr
+ int last_proc;
int m_done_counter;
int m_loads_completed;
int m_stores_completed;
// enforces the previous node to have a certain # of completions
// before next node starts
+
+ map <int64_t, pair <int, Address> > requests;
+ Time m_think_time;
+ Time m_wait_time;
+ int m_tester_length;
+ int m_num_procs;
+ RubyEventQueue * eventQueue;
int m_numCompletionsPerNode;
Histogram m_load_latency;
diff --git a/src/mem/ruby/slicc_interface/AbstractChip.cc b/src/mem/ruby/tester/Driver_Tester.cc
index 021c95b9d..d29e6f988 100644
--- a/src/mem/ruby/slicc_interface/AbstractChip.cc
+++ b/src/mem/ruby/tester/Driver_Tester.cc
@@ -30,18 +30,15 @@
/*
* $Id$
*
- * Description: See AbstractChip.hh
+ * Description: See Driver_Tester.hh
*
*/
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/tester/Driver_Tester.hh"
-AbstractChip::AbstractChip(NodeID id, Network* net_ptr) {
- m_id = id;
- m_net_ptr = net_ptr;
- m_L1Cache_sequencer_vec.setSize(0);
+Driver_Tester::Driver_Tester() {
}
// still need to be defined for subclasses
-AbstractChip::~AbstractChip() {
+Driver_Tester::~Driver_Tester() {
}
diff --git a/src/mem/ruby/tester/RequestGenerator.hh b/src/mem/ruby/tester/Driver_Tester.hh
index 2859eb436..77cd4ed3c 100644
--- a/src/mem/ruby/tester/RequestGenerator.hh
+++ b/src/mem/ruby/tester/Driver_Tester.hh
@@ -34,69 +34,49 @@
*
*/
-#ifndef REQUESTGENERATOR_H
-#define REQUESTGENERATOR_H
+#ifndef Driver_Tester_H
+#define Driver_Tester_H
-#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/common/Consumer.hh"
-#include "mem/protocol/RequestGeneratorStatus.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "mem/ruby/common/Address.hh"
+#include "Address_Tester.hh"
-class Sequencer;
-class SubBlock;
-class SyntheticDriver;
-
-class RequestGenerator : public Consumer {
+class Driver_Tester {
public:
// Constructors
- RequestGenerator(NodeID node, SyntheticDriver& driver);
+ Driver_Tester();
// Destructor
- ~RequestGenerator();
+ virtual ~Driver_Tester() = 0;
// Public Methods
- void wakeup();
- void performCallback(NodeID proc, SubBlock& data);
+ virtual void get_network_config() {}
+ virtual void dmaHitCallback() {};
+ virtual void hitCallback(int64_t id) = 0;
+ virtual void go() = 0;
+ virtual integer_t getInstructionCount(int procID) const { return 1; }
+ virtual integer_t getCycleCount(int procID) const { return 1; }
+ virtual void addThreadDependency(int procID, int requestor_thread, int conflict_thread) const { assert(0);}
+ virtual void printDebug(){}
- void print(ostream& out) const;
-private:
- // Private Methods
- int thinkTime() const;
- int waitTime() const;
- int holdTime() const;
- void initiateTest();
- void initiateSwap();
- void initiateRelease();
- void pickAddress();
- Sequencer* sequencer() const;
+ virtual void printStats(ostream& out) const = 0;
+ virtual void clearStats() = 0;
- // Private copy constructor and assignment operator
- RequestGenerator(const RequestGenerator& obj);
- RequestGenerator& operator=(const RequestGenerator& obj);
+ virtual void printConfig(ostream& out) const = 0;
- // Data Members (m_ prefix)
- SyntheticDriver& m_driver;
- NodeID m_node;
- RequestGeneratorStatus m_status;
- int m_counter;
- Time m_last_transition;
- Address m_address;
-};
+ virtual integer_t readPhysicalMemory(int procID, physical_address_t address,
+ int len ){ ASSERT(0); return 0; }
-// Output operator declaration
-ostream& operator<<(ostream& out, const RequestGenerator& obj);
+ virtual void writePhysicalMemory( int procID, physical_address_t address,
+ integer_t value, int len ){ ASSERT(0); }
-// ******************* Definitions *******************
+protected:
+ // accessible by subclasses
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const RequestGenerator& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
+private:
+ // inaccessible by subclasses
-#endif //REQUESTGENERATOR_H
+};
+#endif //Driver_Tester_H
diff --git a/src/mem/ruby/tester/EventQueue_Tester.hh b/src/mem/ruby/tester/EventQueue_Tester.hh
new file mode 100644
index 000000000..fe600bb84
--- /dev/null
+++ b/src/mem/ruby/tester/EventQueue_Tester.hh
@@ -0,0 +1,118 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description: The RubyEventQueue class implements an event queue which
+ * can be trigger events, allowing our simulation to be event driven.
+ *
+ * Currently, the only event we support is a Consumer being signaled
+ * by calling the consumer's wakeup() routine. Adding the event to
+ * the queue does not require a virtual function call, though calling
+ * wakeup() is a virtual function call.
+ *
+ * The method triggerEvents() is called with a global time. All
+ * events which are before or at this time are triggered in timestamp
+ * order. No ordering is enforced for events scheduled to occur at
+ * the same time. Events scheduled to wakeup the same consumer at the
+ * same time are combined into a single event.
+ *
+ * The method scheduleConsumerWakeup() is called with a global time
+ * and a consumer pointer. The event queue will call the wakeup()
+ * method of the consumer at the appropriate time.
+ *
+ * This implementation of RubyEventQueue uses a dynamically sized array
+ * managed as a heap. The algorithms used has O(lg n) for insert and
+ * O(lg n) for extract minimum element. (Based on chapter 7 of Cormen,
+ * Leiserson, and Rivest.) The array is dynamically sized and is
+ * automatically doubled in size when necessary.
+ *
+ */
+
+#ifndef EVENTQUEUE_H
+#define EVENTQUEUE_H
+
+#include "mem/ruby/tester/Global_Tester.hh"
+#include "mem/gems_common/Vector.hh"
+
+class Consumer;
+template <class TYPE> class PrioHeap;
+class RubyEventQueueNode;
+
+class RubyEventQueue {
+public:
+ // Constructors
+ RubyEventQueue();
+
+ // Destructor
+ ~RubyEventQueue();
+
+ // Public Methods
+
+ Time getTime() const { return m_globalTime; }
+ void scheduleEvent(Consumer* consumer, Time timeDelta) { scheduleEventAbsolute(consumer, timeDelta + m_globalTime); }
+ void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
+ void triggerEvents(Time t); // called to handle all events <= time t
+ void triggerAllEvents();
+ void print(ostream& out) const;
+ bool isEmpty() const;
+
+ Time getTimeOfLastRecovery() {return m_timeOfLastRecovery;}
+ void setTimeOfLastRecovery(Time t) {m_timeOfLastRecovery = t;}
+
+ // Private Methods
+private:
+ // Private copy constructor and assignment operator
+ void init();
+ RubyEventQueue(const RubyEventQueue& obj);
+ RubyEventQueue& operator=(const RubyEventQueue& obj);
+
+ // Data Members (m_ prefix)
+ PrioHeap<RubyEventQueueNode>* m_prio_heap_ptr;
+ Time m_globalTime;
+ Time m_timeOfLastRecovery;
+};
+
+// Output operator declaration
+inline extern
+ostream& operator<<(ostream& out, const RubyEventQueue& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline extern
+ostream& operator<<(ostream& out, const RubyEventQueue& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //EVENTQUEUE_H
diff --git a/src/mem/ruby/tester/Global_Tester.hh b/src/mem/ruby/tester/Global_Tester.hh
new file mode 100644
index 000000000..9d622bbf6
--- /dev/null
+++ b/src/mem/ruby/tester/Global_Tester.hh
@@ -0,0 +1,74 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * */
+
+#ifndef GLOBAL_H
+#define GLOBAL_H
+
+typedef unsigned char uint8;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+
+typedef signed char int8;
+typedef int int32;
+typedef long long int64;
+
+typedef long long integer_t;
+typedef unsigned long long uinteger_t;
+
+typedef int64 Time;
+typedef uint64 physical_address_t;
+typedef uint64 la_t;
+typedef uint64 pa_t;
+typedef integer_t simtime_t;
+typedef int NodeID;
+
+#include "mem/ruby/common/TypeDefines.hh"
+#include "mem/gems_common/std-includes.hh"
+#include "Debug_Tester.hh"
+
+// simple type declarations
+typedef Time LogicalTime;
+typedef int64 Index; // what the address bit ripper returns
+typedef int word; // one word of a cache line
+typedef unsigned int uint;
+typedef int SwitchID;
+typedef int LinkID;
+
+
+class Debug;
+extern Debug * debug_ptr;
+class RubyEventQueue;
+extern RubyEventQueue * eventQueue;
+#endif //GLOBAL_H
+
diff --git a/src/mem/ruby/tester/Instruction.cc b/src/mem/ruby/tester/Instruction.cc
deleted file mode 100644
index 1f4d56fc2..000000000
--- a/src/mem/ruby/tester/Instruction.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
- * Multifacet Project. ALL RIGHTS RESERVED.
- *
- * ##HEADER##
- *
- * This software is furnished under a license and may be used and
- * copied only in accordance with the terms of such license and the
- * inclusion of the above copyright notice. This software or any
- * other copies thereof or any derivative works may not be provided or
- * otherwise made available to any other persons. Title to and
- * ownership of the software is retained by Mark Hill and David Wood.
- * Any use of this software must include the above copyright notice.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
- * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
- * */
-
-/*
- * $Id: Instruction.C 1.2 05/08/26 00:54:48-05:00 xu@s0-32.cs.wisc.edu $
- *
- * Description:
- *
- */
-
-#include "mem/ruby/tester/Instruction.hh"
-
-Instruction::Instruction(){
- m_opcode = Opcode_NUM_OPCODES;
- m_address = Address(physical_address_t(0));
-}
-
-Instruction::Instruction(Opcode op, Address addr){
- m_opcode = op;
- m_address = addr;
- assert(addr.getAddress() == 0);
-}
-
-void Instruction::init(Opcode op, Address addr){
- m_opcode = op;
- m_address = addr;
- //cout << "Instruction(" << op << ", " << m_address << ")" << endl;
-}
-
-Opcode Instruction::getOpcode(){
- return m_opcode;
-}
-
-Address Instruction::getAddress(){
- return m_address;
-}
diff --git a/src/mem/ruby/tester/Instruction.hh b/src/mem/ruby/tester/Instruction.hh
deleted file mode 100644
index 35791dcba..000000000
--- a/src/mem/ruby/tester/Instruction.hh
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
- * Multifacet Project. ALL RIGHTS RESERVED.
- *
- * ##HEADER##
- *
- * This software is furnished under a license and may be used and
- * copied only in accordance with the terms of such license and the
- * inclusion of the above copyright notice. This software or any
- * other copies thereof or any derivative works may not be provided or
- * otherwise made available to any other persons. Title to and
- * ownership of the software is retained by Mark Hill and David Wood.
- * Any use of this software must include the above copyright notice.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
- * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
- * */
-
-/*
- * $Id: Instruction.hh 1.2 05/05/24 12:15:47-05:00 kmoore@balder.cs.wisc.edu $
- *
- * Description:
- *
- */
-
-#ifndef INSTRUCTION_H
-#define INSTRUCTION_H
-
-#include "mem/ruby/common/Address.hh"
-
-
-enum Opcode {
- Opcode_BEGIN,
- Opcode_LD,
- Opcode_ST,
- Opcode_INC,
- Opcode_COMMIT,
- Opcode_DONE,
- Opcode_NUM_OPCODES
-};
-
-class Instruction {
- public:
- Instruction();
- Instruction(Opcode op, Address addr);
-
- void init(Opcode op, Address addr);
- Opcode getOpcode();
- Address getAddress();
-
- private:
- Opcode m_opcode;
- Address m_address;
-
-};
-
-#endif
diff --git a/src/mem/ruby/tester/RaceyDriver.cc b/src/mem/ruby/tester/RaceyDriver.cc
index c56557645..dfecfa796 100644
--- a/src/mem/ruby/tester/RaceyDriver.cc
+++ b/src/mem/ruby/tester/RaceyDriver.cc
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,36 +32,31 @@
*
*/
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/System.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/tester/RaceyDriver.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "RaceyPseudoThread.hh"
-#include "mem/ruby/common/SubBlock.hh"
+#include "mem/ruby/tester/EventQueue_Tester.hh"
+#include "mem/ruby/tester/RaceyPseudoThread.hh"
-RaceyDriver::RaceyDriver()
+RaceyDriver::RaceyDriver(int num_procs, int tester_length)
{
- // debug transition?
- if(false) {
- assert(g_debug_ptr);
- g_debug_ptr->setDebugTime(1);
- }
-
m_finish_time = 0;
m_done_counter = 0;
m_wakeup_thread0 = false;
+ m_num_procs = num_procs;
+ m_tester_length = tester_length;
+ eventQueue = new RubyEventQueue;
// racey at least need two processors
- assert(RubyConfig::numberOfProcessors() >= 2);
+ assert(m_num_procs >= 2);
// init all racey pseudo threads
- m_racey_pseudo_threads.setSize(RubyConfig::numberOfProcessors());
+ m_racey_pseudo_threads.setSize(m_num_procs);
for (int i=0; i<m_racey_pseudo_threads.size(); i++) {
m_racey_pseudo_threads[i] = new RaceyPseudoThread(i, *this);
}
// add this driver to the global event queue, for deadlock detection
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ eventQueue->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
}
RaceyDriver::~RaceyDriver()
@@ -71,20 +66,38 @@ RaceyDriver::~RaceyDriver()
}
}
-void RaceyDriver::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
+void RaceyDriver::go() {
+ // tick both queues until everyone is done
+ while (m_done_counter != m_num_procs) {
+ libruby_tick(1);
+ eventQueue->triggerEvents(eventQueue->getTime() + 1);
+ }
+}
+
+
+void RaceyDriver::hitCallback(int64_t request_id)
{
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
- m_racey_pseudo_threads[proc]->performCallback(proc, data);
+ ASSERT(requests.find(request_id) != requests.end());
+ int proc = requests[request_id].first;
+ Address address = requests[request_id].second.address;
+ uint8_t * data = new uint8_t[4];
+ for (int i = 0; i < 4; i++) {
+ data[i] = requests[request_id].second.data[i];
+ }
+ requests[request_id].second.data;
+ m_racey_pseudo_threads[proc]->performCallback(proc, address, data);
+ requests.erase(request_id);
}
integer_t RaceyDriver::getInstructionCount(int procID) const
{
- return m_racey_pseudo_threads[procID]->getInstructionCounter();
+ // return m_racey_pseudo_threads[procID]->getInstructionCounter();
+ assert(0);
}
int RaceyDriver::runningThreads()
{
- return RubyConfig::numberOfProcessors() - m_done_counter;
+ return m_num_procs - m_done_counter;
}
// used to wake up thread 0 whenever other thread finishes
@@ -96,12 +109,12 @@ void RaceyDriver::registerThread0Wakeup()
void RaceyDriver::joinThread()
{
m_done_counter++;
- if (m_done_counter == RubyConfig::numberOfProcessors()) {
- m_finish_time = g_eventQueue_ptr->getTime();
+ if (m_done_counter == m_num_procs) {
+ m_finish_time = eventQueue->getTime();
}
if(m_wakeup_thread0) {
- g_eventQueue_ptr->scheduleEvent(m_racey_pseudo_threads[0], 1);
+ eventQueue->scheduleEvent(m_racey_pseudo_threads[0], 1);
m_wakeup_thread0 = false;
}
}
@@ -114,14 +127,14 @@ void RaceyDriver::wakeup()
}
// schedule next wakeup
- if (m_done_counter < RubyConfig::numberOfProcessors()) {
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ if (m_done_counter < m_num_procs) {
+ eventQueue->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
}
}
void RaceyDriver::printStats(ostream& out) const
{
- assert(m_done_counter == RubyConfig::numberOfProcessors());
+ assert(m_done_counter == m_num_procs);
out << endl;
out << "RaceyDriver Stats" << endl;
out << "---------------------" << endl;
diff --git a/src/mem/ruby/tester/RaceyDriver.hh b/src/mem/ruby/tester/RaceyDriver.hh
index a3e35b47c..cc2130ef9 100644
--- a/src/mem/ruby/tester/RaceyDriver.hh
+++ b/src/mem/ruby/tester/RaceyDriver.hh
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,15 +38,25 @@
#ifndef RACEYDRIVER_H
#define RACEYDRIVER_H
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Driver.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
+#include "mem/ruby/tester/Driver_Tester.hh"
+#include "mem/ruby/tester/RaceyPseudoThread.hh"
+#include <map>
+#include "mem/ruby/libruby.hh"
-class RaceyPseudoThread;
+#define g_DEADLOCK_THRESHOLD 5000
-class RaceyDriver : public Driver, public Consumer {
+
+struct address_data {
+ Address address;
+ uint8_t * data;
+};
+
+class RaceyDriver : public Driver_Tester, public Consumer {
public:
+ friend class RaceyPseudoThread;
// Constructors
- RaceyDriver();
+ RaceyDriver(int num_procs, int tester_length);
// Destructor
~RaceyDriver();
@@ -59,11 +69,12 @@ public:
return m_racey_pseudo_threads[0]->getInitializedState();
};
- void hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
+ void hitCallback(int64_t request_id);
void wakeup();
void printStats(ostream& out) const;
void clearStats() {}
void printConfig(ostream& out) const {}
+ void go();
integer_t getInstructionCount(int procID) const;
@@ -81,6 +92,7 @@ public:
}
void print(ostream& out) const;
+
private:
// Private copy constructor and assignment operator
@@ -91,8 +103,11 @@ private:
Vector<RaceyPseudoThread*> m_racey_pseudo_threads;
int m_done_counter;
bool m_wakeup_thread0;
-
Time m_finish_time;
+ map <int64_t, pair <int, struct address_data> > requests;
+ RubyEventQueue * eventQueue;
+ int m_num_procs;
+ int m_tester_length;
};
// Output operator declaration
@@ -110,3 +125,4 @@ ostream& operator<<(ostream& out, const RaceyDriver& obj)
}
#endif //RACEYDRIVER_H
+
diff --git a/src/mem/ruby/tester/RaceyPseudoThread.cc b/src/mem/ruby/tester/RaceyPseudoThread.cc
new file mode 100644
index 000000000..e5e1c1169
--- /dev/null
+++ b/src/mem/ruby/tester/RaceyPseudoThread.cc
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 1999 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Description: see RaceyPseudoThread.hh
+ */
+
+#include "mem/ruby/tester/RaceyPseudoThread.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/tester/RaceyDriver.hh"
+#include "gzstream.hh"
+
+RaceyPseudoThread::RaceyPseudoThread(NodeID id, RaceyDriver& driver)
+ : m_driver(driver), m_proc_id(id) {
+
+ resetIC(); // IC contains the committed instruction number
+ m_last_progress = 0;
+ m_done = false;
+ m_stop = 0;
+ m_driver.eventQueue->scheduleEvent(this, 1);
+}
+
+RaceyPseudoThread::~RaceyPseudoThread() {
+}
+
+void RaceyPseudoThread::checkForDeadlock() {
+ Time current_time = m_driver.eventQueue->getTime();
+ if(!m_done && (current_time - m_last_progress) > g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(m_proc_id);
+ WARN_EXPR(m_ic_counter);
+ WARN_EXPR(m_last_progress);
+ ERROR_MSG("Deadlock detected.");
+ }
+}
+
+void RaceyPseudoThread::performCallback(int proc, Address address, uint8_t * data ) {
+ assert(proc == m_proc_id);
+
+ DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
+ DEBUG_EXPR(TESTER_COMP, LowPrio, address);
+
+
+ m_last_progress = m_driver.eventQueue->getTime();
+
+ if(m_read) {
+ int b0, b1, b2, b3;
+ b0 = data[0]; b0 <<= 0;
+ b1 = data[1]; b1 <<= 8;
+ b2 = data[2]; b2 <<= 16;
+ b3 = data[3]; b3 <<= 24;
+ m_value = b0 | b1 | b2 | b3;
+ } else {
+ char b0, b1, b2, b3;
+ b0 = (m_value>>0)&0xFF; data[0] = b0;
+ b1 = (m_value>>8)&0xFF; data[1] = b1;
+ b2 = (m_value>>16)&0xFF; data[2] = b2;
+ b3 = (m_value>>24)&0xFF; data[3] = b3;
+ }
+
+ // schedule wakeup for next requests in next cycle
+ m_driver.eventQueue->scheduleEvent(this, 1);
+
+ // new instruction
+ m_ic_counter++;
+
+}
+
+void RaceyPseudoThread::wakeup() {
+ // for debug
+ if(m_stop != 0) {
+ cout << m_proc_id << " " << m_stop << ((m_read)? " read ":" written ") << m_value;
+ if(0) cout << " [" << m_driver.eventQueue->getTime() << "]";
+ cout << endl;
+ }
+
+ assert(!m_done);
+
+ // Note, this function can not have ANY local variable!
+
+ switch(m_stop) {
+ case 0:
+ break;
+ case 1:
+ goto L1;
+ case 2:
+ goto L2;
+ case 3:
+ goto L3;
+ case 4:
+ goto L4;
+ case 5:
+ goto L5;
+ case 6:
+ goto L6;
+ case 7:
+ goto L7;
+ case 8:
+ goto L8;
+ case 9:
+ goto L9;
+ case 10:
+ goto L10;
+ default:
+ WARN_EXPR(m_stop);
+ ERROR_MSG("RaceyPseudoThread: Bad context point!");
+ }
+
+ //
+ // initialization
+ //
+ if(m_proc_id == 0) {
+ for(m_looper = 0; m_looper < m_driver.m_num_procs; m_looper++) {
+ store_sig(m_looper, m_looper+1);
+ m_stop = 6; return;
+L6: {};
+ }
+ for(m_looper = 0; m_looper < M_ELEM; m_looper++) {
+ store_m(m_looper, M_ELEM-m_looper);
+ m_stop = 7; return;
+L7: {};
+ }
+
+ // init done
+ m_initialized = true;
+ } else {
+ // other processors
+ if(!m_driver.Thread0Initialized()) {
+ // wait for processors 0
+ m_driver.eventQueue->scheduleEvent(this, 1);
+ return;
+ }
+ }
+
+ cout << "Thread " << m_proc_id << " started in parallel phase" << endl;
+
+ //
+ // main thread body
+ //
+ for(m_looper = 0 ; m_looper < m_driver.m_tester_length; m_looper++) {
+ /* m_value = */ load_sig(m_proc_id);
+ m_stop = 1; return;
+L1: {};
+ m_num = m_value;
+ m_index1 = m_num%M_ELEM;
+ /* m_value = */ load_m(m_index1);
+ m_stop = 2; return;
+L2: {};
+ m_num = mix(m_num, m_value);
+ m_index2 = m_num%M_ELEM;
+ /* m_value = */ load_m(m_index2);
+ m_stop = 3; return;
+L3: {};
+ m_num = mix(m_num, m_value);
+ store_m(m_index2, m_num);
+ m_stop = 4; return;
+L4: {};
+ store_sig(m_proc_id, m_num);
+ m_stop = 5; return;
+L5: {};
+ } // end for
+
+ //
+ // compute final sig
+ //
+ if(m_proc_id == 0) {
+ // wait for other threads
+ while (m_driver.runningThreads() > 1) {
+ m_driver.registerThread0Wakeup();
+ m_stop = 10; return;
+L10: {};
+ }
+
+ /* m_value = */ load_sig(0);
+ m_stop = 8; return;
+L8: {};
+ m_final_sig = m_value;
+ for(m_looper = 1; m_looper < m_driver.m_num_procs; m_looper++) {
+ /* m_value = */ load_sig(m_looper);
+ m_stop = 9; return;
+L9: {};
+ m_final_sig = mix(m_value, m_final_sig);
+ }
+ } // processors 0
+
+ // done
+ m_driver.joinThread();
+ m_done = true;
+}
+
+void RaceyPseudoThread::load_sig(unsigned index) {
+ cout << m_proc_id << " : load_sig " << index << endl;
+
+ m_read = true;
+ // timestamp, threadid, action, and logical address are used only by transactional memory, should be augmented
+ uint8_t * read_data = new uint8_t[4];
+
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_proc_id);
+
+ // pc is zero, problem?
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(sig(index), read_data, 4, 0, RubyRequestType_LD, RubyAccessMode_User));
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+
+ struct address_data request_data;
+ request_data.address = Address(sig(index));
+ request_data.data = read_data;
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_proc_id, request_data)));
+
+ /*sequencer()->makeRequest(CacheMsg(Address(sig(index)), Address(sig(index)), CacheRequestType_LD,
+ Address(physical_address_t(1)),
+ AccessModeType_UserMode, 4,
+ PrefetchBit_No, 0, Address(0),
+ 0, 0 , false)); */
+}
+
+void RaceyPseudoThread::load_m(unsigned index) {
+ // cout << m_proc_id << " : load_m " << index << endl;
+
+ m_read = true;
+ uint8_t * read_data = new uint8_t[4];
+
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_proc_id);
+
+ // pc is zero, problem?
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(m(index), read_data, 4, 0, RubyRequestType_LD, RubyAccessMode_User));
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+
+ struct address_data request_data;
+ request_data.address = Address(m(index));
+ request_data.data = read_data;
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_proc_id, request_data)));
+
+ /*sequencer()->makeRequest(CacheMsg(Address(m(index)), Address(m(index)), CacheRequestType_LD,
+ Address(physical_address_t(1)),
+ AccessModeType_UserMode, 4,
+ PrefetchBit_No, 0, Address(0),
+ 0, 0, false)); */
+}
+
+void RaceyPseudoThread::store_sig(unsigned index, unsigned value) {
+ cout << m_proc_id << " : store_sig " << index << " " << value << endl;
+
+ m_read = false;
+ m_value = value;
+ uint8_t * write_data = new uint8_t[4];
+
+ memcpy(write_data, &value, 4);
+
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_proc_id);
+
+ // pc is zero, problem?
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(sig(index), write_data, 4, 0, RubyRequestType_ST, RubyAccessMode_User));
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+
+ struct address_data request_data;
+ request_data.address = Address(sig(index));
+ request_data.data = write_data;
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_proc_id, request_data)));
+
+ /*sequencer()->makeRequest(CacheMsg(Address(sig(index)), Address(sig(index)), CacheRequestType_ST,
+ Address(physical_address_t(1)),
+ AccessModeType_UserMode, 4,
+ PrefetchBit_No, 0, Address(0),
+ 0, 0, false)); */
+}
+
+void RaceyPseudoThread::store_m(unsigned index, unsigned value) {
+ //cout << m_proc_id << " : store_m " << index << endl;
+
+ m_read = false;
+ m_value = value;
+ uint8_t * write_data = new uint8_t[4];
+ memcpy(write_data, &value, 4);
+
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, m_proc_id);
+
+ // pc is zero, problem?
+ int64_t request_id = libruby_issue_request(libruby_get_port_by_name(port_name), RubyRequest(m(index), write_data, 4, 0, RubyRequestType_ST, RubyAccessMode_User));
+
+ ASSERT(m_driver.requests.find(request_id) == m_driver.requests.end());
+
+ struct address_data request_data;
+ request_data.address = Address(m(index));
+ request_data.data = write_data;
+ m_driver.requests.insert(make_pair(request_id, make_pair(m_proc_id, request_data)));
+
+ /*sequencer()->makeRequest(CacheMsg(Address(m(index)), Address(m(index)), CacheRequestType_ST,
+ Address(physical_address_t(1)),
+ AccessModeType_UserMode, 4,
+ PrefetchBit_No, 0, Address(0),
+ 0, 0, false)); */
+}
+
+// Save and Load context of a thread
+void RaceyPseudoThread::saveCPUStates(string filename) {
+ ogzstream out(filename.c_str());
+ out.write((char*)&m_looper, sizeof(int));
+ out.write((char*)&m_num, sizeof(unsigned));
+ out.write((char*)&m_index1, sizeof(unsigned));
+ out.write((char*)&m_index2, sizeof(unsigned));
+ out.write((char*)&m_stop, sizeof(unsigned));
+ out.close();
+}
+
+void RaceyPseudoThread::loadCPUStates(string filename) {
+ igzstream out(filename.c_str());
+ out.read((char*)&m_looper, sizeof(int));
+ out.read((char*)&m_num, sizeof(unsigned));
+ out.read((char*)&m_index1, sizeof(unsigned));
+ out.read((char*)&m_index2, sizeof(unsigned));
+ out.read((char*)&m_stop, sizeof(unsigned));
+ out.close();
+}
+
+void RaceyPseudoThread::print(ostream& out) const {
+ out << "[Racey Pseudo Thread: " << m_proc_id << "]" << endl;
+}
+
diff --git a/src/mem/ruby/tester/RaceyPseudoThread.hh b/src/mem/ruby/tester/RaceyPseudoThread.hh
new file mode 100644
index 000000000..9db4ad04a
--- /dev/null
+++ b/src/mem/ruby/tester/RaceyPseudoThread.hh
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 1999 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Description: This implements a pseudo racey thread which drives ruby timing
+ * simulator with access to two shared arrays.
+ *
+ */
+
+#ifndef RACEYPSEUDOTHREAD_H
+#define RACEYPSEUDOTHREAD_H
+
+#include "mem/ruby/tester/Global_Tester.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/system/NodeID.hh"
+#include "Address_Tester.hh"
+#include "mem/ruby/libruby.hh"
+
+class RaceyDriver;
+
+class RaceyPseudoThread : public Consumer {
+private:
+ // constants
+ static const int PRIME1 = 103072243;
+ static const int PRIME2 = 103995407;
+ static const int M_ELEM = 64;
+
+ // m and sig array's starting address,
+ // each signature should occupy a cacheline
+ static const int SIG_ARR = 0;
+ static const int M_ARR = 0x10000;
+ static const int LINESIZE = 64;
+
+ // get address of a element from the m and sig arrays
+ physical_address_t sig(unsigned index) {
+ assert(index < M_ARR/64);
+ return SIG_ARR + (index*64);
+ };
+ physical_address_t m(unsigned index) { return M_ARR + (index*64); };
+
+public:
+ // Constructors
+ RaceyPseudoThread(NodeID node, RaceyDriver& driver);
+
+ // Destructor
+ ~RaceyPseudoThread();
+
+ // Public Methods
+ void performCallback(int proc, Address address, uint8_t * data);
+
+ void wakeup();
+
+ integer_t getInstructionCount() { return m_ic_counter; };
+
+ unsigned getSignature() { assert(m_proc_id == 0); return m_final_sig; };
+
+ void checkForDeadlock();
+
+ // save and restore the thread state
+ void saveCPUStates(string filename);
+ void loadCPUStates(string filename);
+
+ // reset IC to zero for next checkpoint
+ void resetIC() { m_ic_counter = 0; };
+
+ bool getInitializedState() { return m_initialized; };
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // mix two numbers
+ unsigned mix (unsigned i, unsigned j) { return (i + j * PRIME2) % PRIME1; };
+
+ // load or store the array
+ void load_sig(unsigned index);
+ void load_m(unsigned index);
+ void store_sig(unsigned index, unsigned value);
+ void store_m(unsigned index, unsigned value);
+
+ // Private copy constructor and assignment operator
+ RaceyPseudoThread(const RaceyPseudoThread& obj);
+ RaceyPseudoThread& operator=(const RaceyPseudoThread& obj);
+
+ // Data Members (m_ prefix)
+ RaceyDriver& m_driver;
+ NodeID m_proc_id;
+
+ // are we done?
+ bool m_done;
+
+ // [committed] instruction counter
+ int m_ic_counter;
+
+ // last time we made progress
+ Time m_last_progress;
+
+ // value of the callback block
+ bool m_read;
+ unsigned m_value;
+
+ // final signature
+ unsigned m_final_sig;
+
+ // local variables for the pseudo thread
+ int m_looper;
+ unsigned m_num, m_index1, m_index2, m_stop;
+ bool m_initialized;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const RaceyPseudoThread& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const RaceyPseudoThread& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //RACEYPSEUDOTHREAD_H
+
diff --git a/src/mem/ruby/tester/RequestGenerator.cc b/src/mem/ruby/tester/RequestGenerator.cc
deleted file mode 100644
index 4ee24544f..000000000
--- a/src/mem/ruby/tester/RequestGenerator.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/tester/RequestGenerator.hh"
-#include "mem/protocol/RequestGeneratorStatus.hh"
-#include "mem/protocol/LockStatus.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/ruby/tester/SyntheticDriver.hh"
-#include "mem/protocol/Chip.hh"
-
-RequestGenerator::RequestGenerator(NodeID node, SyntheticDriver& driver) :
- m_driver(driver)
-{
- m_status = RequestGeneratorStatus_Thinking;
- m_last_transition = 0;
- m_node = node;
- pickAddress();
- m_counter = 0;
-
- //g_eventQueue_ptr->scheduleEvent(this, 1+(random() % 200));
-}
-
-RequestGenerator::~RequestGenerator()
-{
-}
-
-void RequestGenerator::wakeup()
-{
- DEBUG_EXPR(TESTER_COMP, MedPrio, m_node);
- DEBUG_EXPR(TESTER_COMP, MedPrio, m_status);
-
- if (m_status == RequestGeneratorStatus_Thinking) {
- m_status = RequestGeneratorStatus_Test_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateTest(); // Test
- } else if (m_status == RequestGeneratorStatus_Holding) {
- m_status = RequestGeneratorStatus_Release_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateRelease(); // Release
- } else if (m_status == RequestGeneratorStatus_Before_Swap) {
- m_status = RequestGeneratorStatus_Swap_Pending;
- m_last_transition = g_eventQueue_ptr->getTime();
- initiateSwap();
- } else {
- WARN_EXPR(m_status);
- ERROR_MSG("Invalid status");
- }
-}
-
-void RequestGenerator::performCallback(NodeID proc, SubBlock& data)
-{
- Address address = data.getAddress();
- assert(proc == m_node);
- assert(address == m_address);
-
- DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
- DEBUG_EXPR(TESTER_COMP, LowPrio, m_status);
- DEBUG_EXPR(TESTER_COMP, LowPrio, address);
- DEBUG_EXPR(TESTER_COMP, LowPrio, data);
-
- if (m_status == RequestGeneratorStatus_Test_Pending) {
- // m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- if (data.readByte() == LockStatus_Locked) {
- // Locked - keep spinning
- m_status = RequestGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- } else {
- // Unlocked - try the swap
- m_driver.recordTestLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- m_status = RequestGeneratorStatus_Before_Swap;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- }
- } else if (m_status == RequestGeneratorStatus_Swap_Pending) {
- m_driver.recordSwapLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- if (data.readByte() == LockStatus_Locked) {
- // We failed to aquire the lock
- m_status = RequestGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- g_eventQueue_ptr->scheduleEvent(this, waitTime());
- } else {
- // We acquired the lock
- data.writeByte(LockStatus_Locked);
- m_status = RequestGeneratorStatus_Holding;
- m_last_transition = g_eventQueue_ptr->getTime();
- DEBUG_MSG(TESTER_COMP, HighPrio, "Acquired");
- DEBUG_EXPR(TESTER_COMP, HighPrio, proc);
- DEBUG_EXPR(TESTER_COMP, HighPrio, g_eventQueue_ptr->getTime());
- g_eventQueue_ptr->scheduleEvent(this, holdTime());
- }
- } else if (m_status == RequestGeneratorStatus_Release_Pending) {
- m_driver.recordReleaseLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- // We're releasing the lock
- data.writeByte(LockStatus_Unlocked);
-
- m_counter++;
- if (m_counter < g_tester_length) {
- m_status = RequestGeneratorStatus_Thinking;
- m_last_transition = g_eventQueue_ptr->getTime();
- pickAddress();
- g_eventQueue_ptr->scheduleEvent(this, thinkTime());
- } else {
- m_driver.reportDone();
- m_status = RequestGeneratorStatus_Done;
- m_last_transition = g_eventQueue_ptr->getTime();
- }
- } else {
- WARN_EXPR(m_status);
- ERROR_MSG("Invalid status");
- }
-}
-
-int RequestGenerator::thinkTime() const
-{
- return g_think_time;
-}
-
-int RequestGenerator::waitTime() const
-{
- return g_wait_time;
-}
-
-int RequestGenerator::holdTime() const
-{
- return g_hold_time;
-}
-
-void RequestGenerator::pickAddress()
-{
- assert(m_status == RequestGeneratorStatus_Thinking);
- m_address = m_driver.pickAddress(m_node);
-}
-
-void RequestGenerator::initiateTest()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Test");
-
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 1, 0, 0);
- MemCmd::Command command;
- command = MemCmd::ReadReq;
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
-
- sequencer()->makeRequest(&pkt);
-}
-
-void RequestGenerator::initiateSwap()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Swap");
-
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 2, 0, 0);
- MemCmd::Command command;
- command = MemCmd::SwapReq;
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
-
- sequencer()->makeRequest(&pkt);
-}
-
-void RequestGenerator::initiateRelease()
-{
- DEBUG_MSG(TESTER_COMP, MedPrio, "initiating Release");
-
- Addr data_addr = m_address.getAddress();
- Request request(0, data_addr, 1, Flags<unsigned int>(), 3, 0, 0);
- MemCmd::Command command;
- command = MemCmd::WriteReq;
-
- Packet pkt(&request, command, 0); // TODO -- make dest a real NodeID
-
- sequencer()->makeRequest(&pkt);
-}
-
-Sequencer* RequestGenerator::sequencer() const
-{
- return g_system_ptr->getChip(m_node/RubyConfig::numberOfProcsPerChip())->getSequencer(m_node%RubyConfig::numberOfProcsPerChip());
-}
-
-void RequestGenerator::print(ostream& out) const
-{
- out << "[RequestGenerator]" << endl;
-}
-
diff --git a/src/mem/ruby/tester/SpecifiedGenerator.cc b/src/mem/ruby/tester/SpecifiedGenerator.cc
index 1c273eac3..63e4a7ae8 100644
--- a/src/mem/ruby/tester/SpecifiedGenerator.cc
+++ b/src/mem/ruby/tester/SpecifiedGenerator.cc
@@ -33,10 +33,6 @@
*/
#include "mem/ruby/tester/SpecifiedGenerator.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/ruby/tester/SyntheticDriver.hh"
SpecifiedGenerator::SpecifiedGenerator()
{
diff --git a/src/mem/ruby/tester/SpecifiedGenerator.hh b/src/mem/ruby/tester/SpecifiedGenerator.hh
index 9aaaffba0..976947ce6 100644
--- a/src/mem/ruby/tester/SpecifiedGenerator.hh
+++ b/src/mem/ruby/tester/SpecifiedGenerator.hh
@@ -37,12 +37,10 @@
#ifndef SPECIFIEDGENERATOR_H
#define SPECIFIEDGENERATOR_H
-#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/NodeID.hh"
-
-class Sequencer;
-class SubBlock;
+#include "Address_Tester.hh"
class SpecifiedGenerator : public Consumer {
public:
@@ -54,7 +52,7 @@ public:
// Public Methods
virtual void wakeup() = 0;
- virtual void performCallback(NodeID proc, SubBlock& data) = 0;
+ virtual void performCallback(NodeID proc, Address address) = 0;
virtual void print(ostream& out) const = 0;
protected:
diff --git a/src/mem/ruby/tester/SyntheticDriver.cc b/src/mem/ruby/tester/SyntheticDriver.cc
deleted file mode 100644
index f19baa202..000000000
--- a/src/mem/ruby/tester/SyntheticDriver.cc
+++ /dev/null
@@ -1,286 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/tester/SyntheticDriver.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/ruby/tester/RequestGenerator.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/protocol/Chip.hh"
-
-SyntheticDriver::SyntheticDriver(RubySystem* sys_ptr)
-{
- m_finish_time = 0;
- m_done_counter = 0;
-
- m_last_progress_vector.setSize(RubyConfig::numberOfProcessors());
- for (int i=0; i<m_last_progress_vector.size(); i++) {
- m_last_progress_vector[i] = 0;
- }
-
- m_lock_vector.setSize(g_synthetic_locks);
- for (int i=0; i<m_lock_vector.size(); i++) {
- m_lock_vector[i] = -1; // No processor last held it
- }
-
- m_request_generator_vector.setSize(RubyConfig::numberOfProcessors());
- for (int i=0; i<m_request_generator_vector.size(); i++) {
- if(XACT_MEMORY){
- //m_request_generator_vector[i] = new XactRequestGenerator(i, *this);
- } else {
- m_request_generator_vector[i] = new RequestGenerator(i, *this);
- }
- }
-
- // add the tester consumer to the global event queue
- g_eventQueue_ptr->scheduleEvent(this, 1);
-}
-
-SyntheticDriver::~SyntheticDriver()
-{
- for (int i=0; i<m_last_progress_vector.size(); i++) {
- delete m_request_generator_vector[i];
- }
-}
-
-void
-SyntheticDriver::hitCallback(Packet * pkt)
-{
- NodeID proc = pkt->req->contextId();
- SubBlock data(Address(pkt->getAddr()), pkt->req->getSize());
- if (pkt->hasData()) {
- for (int i = 0; i < pkt->req->getSize(); i++) {
- data.setByte(i, *(pkt->getPtr<uint8>()+i));
- }
- }
- m_request_generator_vector[proc]->performCallback(proc, data);
- m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
-}
-
-void SyntheticDriver::abortCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
-{
- //cout << "SyntheticDriver::abortCallback" << endl;
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
-
- if(XACT_MEMORY){
- //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
- //reqGen->abortTransaction();
- //reqGen->performCallback(proc, data);
- } else {
- m_request_generator_vector[proc]->performCallback(proc, data);
- }
-
- // Mark that we made progress
- m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
-}
-
-// For Transactional Memory
-/*
-// called whenever we send a nack
-void SyntheticDriver::notifySendNack( int proc, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id ){
- if(XACT_MEMORY){
- //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
- //reqGen->notifySendNack(addr, remote_timestamp, remote_id);
- }
- else{
- cout << "notifySendNack NOT USING TM" << endl;
- ASSERT(0);
- }
-}
-
-// called whenever we receive a NACK
-// Either for a demand request or log store
-void SyntheticDriver::notifyReceiveNack( int proc, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id ){
- if(XACT_MEMORY){
- //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
- //reqGen->notifyReceiveNack(addr, remote_timestamp, remote_id);
- }
- else{
- cout << "notifyReceiveNack NOT USING TM" << endl;
- ASSERT(0);
- }
-}
-
-// called whenever we received ALL the NACKs. Take abort or retry action here
-void SyntheticDriver::notifyReceiveNackFinal(int proc, const Address & addr){
- if(XACT_MEMORY){
- //XactRequestGenerator* reqGen = static_cast<XactRequestGenerator*>(m_request_generator_vector[proc]);
- //reqGen->notifyReceiveNackFinal(addr);
- }
- else{
- ASSERT(0);
- }
-}
-
-// called during abort handling
-// void SyntheticDriver::notifyAbortStart( const Address & handlerPC ){
-
-// }
-
-// void SyntheticDriver::notifyAbortComplete( const Address & newPC ){
-
-// }
-*/
-
-Address SyntheticDriver::pickAddress(NodeID node)
-{
- // This methods picks a random lock that we were NOT that last
- // processor to acquire. Why? Without this change 2 and 4
- // processor runs, the odds of having the lock in your cache in
- // read/write state is 50% or 25%, respectively. This effect can
- // make our 'throughput per processor' results look too strange.
-
- Address addr;
- // FIXME - make this a parameter of the workload
- int lock_number = 0;
- int counter = 0;
- while (1) {
- // Pick a random lock
- lock_number = random() % m_lock_vector.size();
-
- // Were we the last to acquire the lock?
- if (m_lock_vector[lock_number] != node) {
- break;
- }
-
- // Don't keep trying forever, since if there is only one lock, we're always the last to try to obtain the lock
- counter++;
- if (counter > 10) {
- break;
- }
- }
-
- // We're going to acquire it soon, so we can update the last
- // processor to hold the lock at this time
- m_lock_vector[lock_number] = node;
-
- // One lock per cache line
- addr.setAddress(lock_number * RubyConfig::dataBlockBytes());
- return addr;
-}
-
-void SyntheticDriver::reportDone()
-{
- m_done_counter++;
- if (m_done_counter == RubyConfig::numberOfProcessors()) {
- m_finish_time = g_eventQueue_ptr->getTime();
- }
-}
-
-void SyntheticDriver::recordTestLatency(Time time)
-{
- m_test_latency.add(time);
-}
-
-void SyntheticDriver::recordSwapLatency(Time time)
-{
- m_swap_latency.add(time);
-}
-
-void SyntheticDriver::recordReleaseLatency(Time time)
-{
- m_release_latency.add(time);
-}
-
-void SyntheticDriver::wakeup()
-{
- // checkForDeadlock();
- if (m_done_counter < RubyConfig::numberOfProcessors()) {
- g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
- }
-}
-
-void SyntheticDriver::checkForDeadlock()
-{
- int size = m_last_progress_vector.size();
- Time current_time = g_eventQueue_ptr->getTime();
- for (int processor=0; processor<size; processor++) {
- if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
- WARN_EXPR(processor);
-#ifndef NDEBUG
- Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
-#endif
- assert(seq_ptr != NULL);
- // if (seq_ptr->isRequestPending()) {
- // WARN_EXPR(seq_ptr->pendingAddress());
- // }
- WARN_EXPR(current_time);
- WARN_EXPR(m_last_progress_vector[processor]);
- WARN_EXPR(current_time - m_last_progress_vector[processor]);
- ERROR_MSG("Deadlock detected.");
- }
- }
-}
-
-integer_t SyntheticDriver::readPhysicalMemory(int procID, physical_address_t address,
- int len ){
- char buffer[8];
- ASSERT(len <= 8);
- Sequencer* seq = g_system_ptr->getChip(procID/RubyConfig::numberOfProcsPerChip())->getSequencer(procID%RubyConfig::numberOfProcsPerChip());
- assert(seq != NULL);
- bool found = seq->getRubyMemoryValue(Address(address), buffer, len );
- ASSERT(found);
- return *((integer_t *) buffer);
-}
-
-void SyntheticDriver::writePhysicalMemory( int procID, physical_address_t address,
- integer_t value, int len ){
- char buffer[8];
- ASSERT(len <= 8);
-
- memcpy(buffer, (const void*) &value, len);
- DEBUG_EXPR(TESTER_COMP, MedPrio, "");
- Sequencer* seq = g_system_ptr->getChip(procID/RubyConfig::numberOfProcsPerChip())->getSequencer(procID%RubyConfig::numberOfProcsPerChip());
- assert(seq != NULL);
- bool found = seq->setRubyMemoryValue(Address(address), buffer, len );
- ASSERT(found);
- //return found;
-}
-
-void SyntheticDriver::printStats(ostream& out) const
-{
- out << endl;
- out << "SyntheticDriver Stats" << endl;
- out << "---------------------" << endl;
-
- out << "synthetic_finish_time: " << m_finish_time << endl;
- out << "test_latency: " << m_test_latency << endl;
- out << "swap_latency: " << m_swap_latency << endl;
- out << "release_latency: " << m_release_latency << endl;
-}
-
-void SyntheticDriver::print(ostream& out) const
-{
-}
diff --git a/src/mem/ruby/tester/SyntheticDriver.hh b/src/mem/ruby/tester/SyntheticDriver.hh
deleted file mode 100644
index 18c463e88..000000000
--- a/src/mem/ruby/tester/SyntheticDriver.hh
+++ /dev/null
@@ -1,118 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef SYNTHETICDRIVER_H
-#define SYNTHETICDRIVER_H
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Driver.hh"
-#include "mem/ruby/common/Histogram.hh"
-#include "mem/protocol/CacheRequestType.hh"
-
-class RubySystem;
-class RequestGenerator;
-
-class SyntheticDriver : public Driver, public Consumer {
-public:
- // Constructors
- SyntheticDriver(RubySystem* sys_ptr);
-
- // Destructor
- ~SyntheticDriver();
-
- // Public Methods
- Address pickAddress(NodeID node);
- void reportDone();
- void recordTestLatency(Time time);
- void recordSwapLatency(Time time);
- void recordReleaseLatency(Time time);
-
- void hitCallback(Packet* pkt);
- void conflictCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) {assert(0);}
- void abortCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread);
- void wakeup();
- void printStats(ostream& out) const;
- void clearStats() {}
- void printConfig(ostream& out) const {}
-
- integer_t readPhysicalMemory(int procID, physical_address_t address,
- int len );
-
- void writePhysicalMemory( int procID, physical_address_t address,
- integer_t value, int len );
-
- void print(ostream& out) const;
-
- // For handling NACKs/retries
- //void notifySendNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id);
- //void notifyReceiveNack( int procID, const Address & addr, uint64 remote_timestamp, const MachineID & remote_id);
- //void notifyReceiveNackFinal( int procID, const Address & addr);
-
-private:
- // Private Methods
- void checkForDeadlock();
-
- // Private copy constructor and assignment operator
- SyntheticDriver(const SyntheticDriver& obj);
- SyntheticDriver& operator=(const SyntheticDriver& obj);
-
- // Data Members (m_ prefix)
- Vector<Time> m_last_progress_vector;
- Vector<RequestGenerator*> m_request_generator_vector;
- Vector<NodeID> m_lock_vector; // Processor last to hold the lock
- int m_done_counter;
-
- Histogram m_test_latency;
- Histogram m_swap_latency;
- Histogram m_release_latency;
- Time m_finish_time;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const SyntheticDriver& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const SyntheticDriver& obj)
-{
- obj.print(out);
- out << flush;
- return out;
-}
-
-#endif //SYNTHETICDRIVER_H
diff --git a/src/mem/ruby/tester/Tester.cc b/src/mem/ruby/tester/Tester.cc
deleted file mode 100644
index eafc04a92..000000000
--- a/src/mem/ruby/tester/Tester.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/tester/Tester.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/ruby/common/SubBlock.hh"
-#include "mem/ruby/tester/Check.hh"
-#include "mem/protocol/Chip.hh"
-
-Tester::Tester(RubySystem* sys_ptr)
-{
- g_callback_counter = 0;
-
- // add the tester consumer to the global event queue
- g_eventQueue_ptr->scheduleEvent(this, 1);
-
- m_last_progress_vector.setSize(RubyConfig::numberOfProcessors());
- for (int i=0; i<m_last_progress_vector.size(); i++) {
- m_last_progress_vector[i] = 0;
- }
-}
-
-Tester::~Tester()
-{
-}
-
-void Tester::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread)
-{
- // Mark that we made progress
- m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
- g_callback_counter++;
-
- // This tells us our store has 'completed' or for a load gives us
- // back the data to make the check
- DEBUG_EXPR(TESTER_COMP, MedPrio, proc);
- DEBUG_EXPR(TESTER_COMP, MedPrio, data);
- Check* check_ptr = m_checkTable.getCheck(data.getAddress());
- assert(check_ptr != NULL);
- check_ptr->performCallback(proc, data);
-
-}
-
-void Tester::wakeup()
-{
- if (g_callback_counter < g_tester_length) {
- // Try to perform an action or check
- Check* check_ptr = m_checkTable.getRandomCheck();
- assert(check_ptr != NULL);
- check_ptr->initiate();
-
- checkForDeadlock();
-
- g_eventQueue_ptr->scheduleEvent(this, 2);
- }
-}
-
-void Tester::checkForDeadlock()
-{
- int size = m_last_progress_vector.size();
- Time current_time = g_eventQueue_ptr->getTime();
- for (int processor=0; processor<size; processor++) {
- if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
- WARN_EXPR(current_time);
- WARN_EXPR(m_last_progress_vector[processor]);
- WARN_EXPR(current_time - m_last_progress_vector[processor]);
- WARN_EXPR(processor);
- Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
- assert(seq_ptr != NULL);
- WARN_EXPR(*seq_ptr);
- ERROR_MSG("Deadlock detected.");
- }
- }
-}
-
-void Tester::print(ostream& out) const
-{
- out << "[Tester]" << endl;
-}
-
diff --git a/src/mem/ruby/tester/main.cc b/src/mem/ruby/tester/main.cc
index ba835b488..849206de9 100644
--- a/src/mem/ruby/tester/main.cc
+++ b/src/mem/ruby/tester/main.cc
@@ -32,10 +32,8 @@
*
*/
-#include "mem/ruby/tester/main.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/ruby/config/RubyConfig.hh"
-//#include "mem/ruby/tester/test_framework.hh"
+#include "mem/slicc/main.hh"
+#include "mem/ruby/tester/test_framework.hh"
// *******************
// *** tester main ***
@@ -43,6 +41,5 @@
int main(int argc, char *argv[])
{
- //dsm: PRUNED
- //tester_main(argc, argv);
+ tester_main(argc, argv);
}
diff --git a/src/mem/ruby/tester/main.hh b/src/mem/ruby/tester/main.hh
index 3708d770d..ca036ddd7 100644
--- a/src/mem/ruby/tester/main.hh
+++ b/src/mem/ruby/tester/main.hh
@@ -27,9 +27,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
#ifndef MAIN_H
#define MAIN_H
-#include "mem/ruby/common/Global.hh"
+#include "Global_Tester.hh"
#endif //MAIN_H
diff --git a/src/mem/ruby/tester/test_framework.cc b/src/mem/ruby/tester/test_framework.cc
index 9886adc8d..6b7c7ddec 100644
--- a/src/mem/ruby/tester/test_framework.cc
+++ b/src/mem/ruby/tester/test_framework.cc
@@ -32,181 +32,233 @@
*
*/
-#include "mem/protocol/protocol_name.hh"
+using namespace std;
+
#include "mem/ruby/tester/test_framework.hh"
-#include "mem/ruby/system/System.hh"
-#include "mem/ruby/init.hh"
-#include "mem/ruby/tester/Tester.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include "mem/protocol/protocol_name.hh"
#include "getopt.hh"
-#include "mem/ruby/network/Network.hh"
-#include "mem/ruby/recorder/CacheRecorder.hh"
-#include "mem/ruby/recorder/Tracer.hh"
+#include "mem/ruby/tester/DeterministicDriver.hh"
+#include "mem/ruby/tester/RaceyDriver.hh"
+#include "mem/ruby/tester/Driver_Tester.hh"
-using namespace std;
#include <string>
#include <map>
+#include <iostream>
+#include <assert.h>
+#include <vector>
+#include <string>
+#include <sstream>
+#include <sys/wait.h>
-// Maurice
-// extern "C" {
-// #include "simics/api.hh"
-// };
+#include "mem/ruby/libruby.hh"
-#include "mem/gems_common/ioutil/confio.hh"
-#include "mem/gems_common/ioutil/initvar.hh"
+// FIXME: should really make this a class if can't figure out how to make a function to get the ruby parameter
-// A generated file containing the default tester parameters in string form
-// The defaults are stored in the variables
-// global_default_param and global_default_tester_param
-#include "mem/ruby/default_param.hh"
-#include "mem/ruby/tester_param.hh"
+static void set_defaults();
static void parseOptions(int argc, char **argv);
static void usageInstructions();
static void checkArg(char ch);
-static void tester_record_cache();
-static void tester_playback_trace();
static void tester_initialize(int argc, char **argv);
static void tester_destroy();
+static void hit_callback(int64_t request_id);
+
+// Tester variables
+string driver_type;
+string generator_type;
+Driver_Tester * m_driver_ptr;
+int g_tester_length;
+int num_completions;
+Time g_think_time;
+Time g_wait_time;
+int num_procs;
+
+// Debugger variables
+Debug * debug_ptr;
+string g_debug_verbosity_string;
+string g_debug_filter_string;
+string g_debug_output_filename;
+Time g_debug_start_time;
-static string trace_filename;
-char * my_default_param;
-initvar_t * my_initvar;
void tester_main(int argc, char **argv)
{
tester_initialize(argc, argv);
- if (trace_filename != "") {
- // playback a trace (for multicast-mask prediction)
- tester_playback_trace();
- } else {
- // test code to create a trace
- if (!(g_SYNTHETIC_DRIVER || g_DETERMINISTIC_DRIVER) && trace_filename == "") {
- g_system_ptr->getTracer()->startTrace("ruby.trace.gz");
- g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 10000);
- g_system_ptr->getTracer()->stopTrace();
- }
-
- g_eventQueue_ptr->triggerAllEvents();
-
- // This call is placed here to make sure the cache dump code doesn't fall victim to code rot
- if (!(g_SYNTHETIC_DRIVER || g_DETERMINISTIC_DRIVER)) {
- tester_record_cache();
- }
- }
tester_destroy();
}
-static void tester_allocate( void )
+vector<string> tokenizeMyString(string str, string delims)
{
- init_simulator();
+ vector<string> tokens;
+ char* pch;
+ char* tmp;
+ const char* c_delims = delims.c_str();
+ tmp = new char[str.length()+1];
+ strcpy(tmp, str.c_str());
+ pch = strtok(tmp, c_delims);
+ while (pch != NULL) {
+ tokens.push_back(string(pch));
+ pch = strtok(NULL, c_delims);
+ }
+ delete [] tmp;
+ return tokens;
}
-static void tester_generate_values( void )
+
+vector<string> getPorts(const char* cfg_script, int cfg_script_argc, char* cfg_script_argv[])
{
+ stringstream cfg_output;
+
+ // first we execute the Ruby-lang configuration script
+ int fd[2];
+ int pid;
+ if (pipe(fd) == -1) {
+ perror("Error Creating Pipe");
+ exit(EXIT_FAILURE);
+ }
+
+ pid = fork();
+ if (pid == -1){
+ perror("Error forking");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!pid) {
+ // child
+ close(fd[0]); // close the read end of the pipe
+ // replace stdout with the write pipe
+ if (dup2(fd[1], STDOUT_FILENO) == -1) {
+ perror("Error redirecting stdout");
+ exit(EXIT_FAILURE);
+ }
+#define QUOTE_MACRO(x, y) QUOTE_TXT(x,y)
+#define QUOTE_TXT(x, y) #x y
+ if (execlp("ruby", "ruby", "-I", QUOTE_MACRO(GEMS_ROOT, "/ruby/config"), QUOTE_MACRO(GEMS_ROOT, "/tests/list_ports.rb"), cfg_script, NULL)) {
+ perror("execlp");
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ close(fd[1]);
+
+ int child_status;
+ if (wait(&child_status) == -1) {
+ perror("wait");
+ exit(EXIT_FAILURE);
+ }
+ if (child_status != EXIT_SUCCESS) {
+ exit(EXIT_FAILURE);
+ }
+
+ char buf[100];
+ int bytes_read;
+ while( (bytes_read = read(fd[0], buf, 100)) > 0 ) {
+ for (int i=0;i<bytes_read;i++) {
+ cfg_output << buf[i];
+ }
+ }
+ assert(bytes_read == 0);
+ close(fd[0]);
+ }
+ string line;
+ getline(cfg_output, line);
+
+ return tokenizeMyString(line, " ");
}
+
+
void tester_initialize(int argc, char **argv)
{
- int param_len = strlen( global_default_param ) + strlen( global_default_tester_param ) + 1;
- char *default_param = (char *) malloc( sizeof(char) * param_len );
- my_default_param = default_param;
- strcpy( default_param, global_default_param );
- strcat( default_param, global_default_tester_param );
-
- // when the initvar object is created, it reads the configuration default
- // -for the tester, the configuration defaults in config/tester.defaults
-
- /** note: default_param is included twice in the tester:
- * -once in init.C
- * -again in this file
- */
- initvar_t *ruby_initvar = new initvar_t( "ruby", "../../../ruby/",
- default_param,
- &tester_allocate,
- &tester_generate_values,
- NULL,
- NULL );
- my_initvar = ruby_initvar;
- ruby_initvar->checkInitialization();
+ const char* cfg_file = argv[1];
+
+ set_defaults();
parseOptions(argc, argv);
- ruby_initvar->allocate();
+ libruby_init(cfg_file);
+ libruby_print_config(std::cout);
- g_system_ptr->printConfig(cout);
- cout << "Testing clear stats...";
- g_system_ptr->clearStats();
- cout << "Done." << endl;
- //free( default_param );
- //delete ruby_initvar;
-}
+ vector<string> port_names = getPorts(cfg_file, 0, NULL);
+ vector<RubyPortHandle> ports;
-void tester_destroy()
-{
- g_system_ptr->printStats(cout);
- g_debug_ptr->closeDebugOutputFile();
+ for (vector<string>::const_iterator it = port_names.begin(); it != port_names.end(); it++)
+ ports.push_back(libruby_get_port((*it).c_str(), hit_callback));
- free(my_default_param);
- delete my_initvar;
- // Clean up
- destroy_simulator();
- cerr << "Success: " << CURRENT_PROTOCOL << endl;
-}
+ debug_ptr = new Debug( g_debug_filter_string.c_str(),
+ g_debug_verbosity_string.c_str(),
+ g_debug_start_time,
+ g_debug_output_filename.c_str() );
-void tester_install_opal(mf_opal_api_t* opal_api, mf_ruby_api_t* ruby_api)
-{
- std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
+ if (driver_type == "Deterministic") {
+ m_driver_ptr = new DeterministicDriver(generator_type, num_completions, num_procs, g_think_time, g_wait_time, g_tester_length);
+ }
+ else if (driver_type == "Racey") {
+ m_driver_ptr = new RaceyDriver(num_procs, g_tester_length);
+ }
+ /* else if (driver_type == "Synthetic") {
+ m_driver_ptr = new SyntheticDriver();
+ }
+ }*/
+
+ m_driver_ptr->go();
}
-void tester_record_cache()
+void tester_destroy()
{
- cout << "Testing recording of cache contents" << endl;
- CacheRecorder recorder;
- g_system_ptr->recordCacheContents(recorder);
- int written = recorder.dumpRecords("ruby.caches.gz");
- int read = Tracer::playbackTrace("ruby.caches.gz");
- assert(read == written);
- cout << "Testing recording of cache contents completed" << endl;
+ m_driver_ptr->printStats(cout);
+ libruby_destroy();
+ cerr << "Success: " << CURRENT_PROTOCOL << endl;
}
-void tester_playback_trace()
+
+void hit_callback(int64_t request_id)
{
- assert(trace_filename != "");
- cout << "Reading trace from file '" << trace_filename << "'..." << endl;
- int read = Tracer::playbackTrace(trace_filename);
- cout << "(" << read << " requests read)" << endl;
- if (read == 0) {
- ERROR_MSG("Zero items read from tracefile.");
- }
+ m_driver_ptr->hitCallback(request_id);
}
// ************************************************************************
// *** Functions for parsing the command line parameters for the tester ***
// ************************************************************************
+
+
static struct option const long_options[] =
{
{"help", no_argument, NULL, 'h'},
- {"processors", required_argument, NULL, 'p'},
- {"length", required_argument, NULL, 'l'},
- {"random", required_argument, NULL, 'r'},
- {"trace_input", required_argument, NULL, 'z'},
- {"component", required_argument, NULL, 'c'},
- {"verbosity", required_argument, NULL, 'v'},
- {"debug_output_file", required_argument, NULL, 'o'},
- {"start", required_argument, NULL, 's'},
- {"bandwidth", required_argument, NULL, 'b'},
- {"threshold", required_argument, NULL, 't'},
- {"think_time", required_argument, NULL, 'k'},
- {"locks", required_argument, NULL, 'q'},
- {"network", required_argument, NULL, 'n'},
- {"procs_per_chip", required_argument, NULL, 'a'},
- {"l2_caches", required_argument, NULL, 'e'},
- {"memories", required_argument, NULL, 'm'},
+ {"number of processors", required_argument, NULL, 'p'},
+ {"test run length", required_argument, NULL, 'l'},
+ {"debugger verbosity", required_argument, NULL, 'v'},
+ {"debugger filter component", required_argument, NULL, 'c'},
+ {"debugger output file", required_argument, NULL, 'o'},
+ {"debugger start time", required_argument, NULL, 's'},
+ {"generator think time", required_argument, NULL, 'k'},
+ {"generator wait time", required_argument, NULL, 'w'},
+ {"driver type", required_argument, NULL, 'd'},
+ {"generator type", required_argument, NULL, 'g'},
+ {"num completions before pass", required_argument, NULL, 'n'},
{NULL, 0, NULL, 0}
};
+
+// This is awkward and temporary, need the defaults config, and also need functions to
+// just lookup a parameter in the configuration file
+// Ideally the default values are set by libruby_init and then a function is provided to
+// set values at run-time
+static void set_defaults() {
+ g_tester_length = 0;
+ g_think_time = 5;
+ g_wait_time = 20;
+
+ num_procs = 1;
+ num_completions = 1;
+ driver_type = "Deterministic";
+ generator_type = "DetermInvGenerator";
+ g_debug_verbosity_string = "none";
+ g_debug_filter_string = "none";
+ g_debug_output_filename = "none";
+ g_debug_start_time = 0;
+}
+
static void parseOptions(int argc, char **argv)
{
cout << "Parsing command line arguments:" << endl;
@@ -229,21 +281,14 @@ static void parseOptions(int argc, char **argv)
switch (c) {
case 0:
break;
-
- case 'c':
- checkArg(c);
- cout << " component filter string = " << optarg << endl;
- error = Debug::checkFilterString( optarg );
- if (error) {
- usageInstructions();
- }
- DEBUG_FILTER_STRING = strdup( optarg );
- break;
-
case 'h':
usageInstructions();
break;
-
+ case 'p':
+ checkArg(c);
+ cout << " number of processors = " << optarg << endl;
+ num_procs = atoi( optarg );
+ break;
case 'v':
checkArg(c);
cout << " verbosity string = " << optarg << endl;
@@ -251,22 +296,8 @@ static void parseOptions(int argc, char **argv)
if (error) {
usageInstructions();
}
- DEBUG_VERBOSITY_STRING = strdup( optarg );
- break;
-
- case 'r': {
- checkArg(c);
- if (string(optarg) == "random") {
- g_RANDOM_SEED = time(NULL);
- } else {
- g_RANDOM_SEED = atoi(optarg);
- if (g_RANDOM_SEED == 0) {
- usageInstructions();
- }
- }
+ g_debug_verbosity_string = strdup( optarg );
break;
- }
-
case 'l': {
checkArg(c);
g_tester_length = atoi(optarg);
@@ -276,44 +307,15 @@ static void parseOptions(int argc, char **argv)
}
break;
}
-
- case 'q': {
+ case 'c':
checkArg(c);
- g_synthetic_locks = atoi(optarg);
- cout << " locks in synthetic workload = " << g_synthetic_locks << endl;
- if (g_synthetic_locks == 0) {
+ cout << " component filter string = " << optarg << endl;
+ error = Debug::checkFilterString( optarg );
+ if (error) {
usageInstructions();
}
+ g_debug_filter_string = strdup( optarg );
break;
- }
-
- case 'p': {
- checkArg(c);
- g_NUM_PROCESSORS = atoi(optarg);
- break;
- }
-
- case 'a': {
- checkArg(c);
- g_PROCS_PER_CHIP = atoi(optarg);
- cout << " g_PROCS_PER_CHIP: " << g_PROCS_PER_CHIP << endl;
- break;
- }
-
- case 'e': {
- checkArg(c);
- g_NUM_L2_BANKS = atoi(optarg);
- cout << " g_NUM_L2_BANKS: " << g_NUM_L2_BANKS << endl;
- break;
- }
-
- case 'm': {
- checkArg(c);
- g_NUM_MEMORIES = atoi(optarg);
- cout << " g_NUM_MEMORIES: " << g_NUM_MEMORIES << endl;
- break;
- }
-
case 's': {
checkArg(c);
long long start_time = atoll(optarg);
@@ -321,71 +323,44 @@ static void parseOptions(int argc, char **argv)
if (start_time == 0) {
usageInstructions();
}
- DEBUG_START_TIME = start_time;
- break;
- }
-
- case 'b': {
- checkArg(c);
- int bandwidth = atoi(optarg);
- cout << " bandwidth per link (MB/sec) = " << bandwidth << endl;
- g_endpoint_bandwidth = bandwidth;
- if (bandwidth == 0) {
- usageInstructions();
- }
+ g_debug_start_time = start_time;
break;
}
-
- case 't': {
+ case 'k': {
checkArg(c);
- g_bash_bandwidth_adaptive_threshold = atof(optarg);
- if ((g_bash_bandwidth_adaptive_threshold > 1.1) || (g_bash_bandwidth_adaptive_threshold < -0.1)) {
- cerr << "Error: Bandwidth adaptive threshold must be between 0.0 and 1.0" << endl;
- usageInstructions();
- }
-
+ g_think_time = atoi(optarg);
break;
}
-
- case 'k': {
+ case 'w': {
checkArg(c);
- g_think_time = atoi(optarg);
+ g_wait_time = atoi(optarg);
break;
}
-
case 'o':
checkArg(c);
cout << " output file = " << optarg << endl;
- DEBUG_OUTPUT_FILENAME = strdup( optarg );
+ g_debug_output_filename = strdup( optarg );
break;
-
- case 'z':
+ case 'd':
checkArg(c);
- trace_filename = string(optarg);
- cout << " tracefile = " << trace_filename << endl;
+ cout << " driver type = " << optarg << endl;
+ driver_type = strdup( optarg );
+ break;
+ case 'g':
+ checkArg(c);
+ cout << " generator type = " << optarg << endl;
+ generator_type = strdup( optarg );
+ break;
+ case 'n':
+ checkArg(c);
+ cout << " num completions before pass = " << optarg << endl;
+ num_completions = atoi( optarg );
break;
-
- case 'n':
- checkArg(c);
- cout << " topology = " << string(optarg) << endl;
- g_NETWORK_TOPOLOGY = strdup(optarg);
- break;
-
default:
cerr << "parameter '" << c << "' unknown" << endl;
usageInstructions();
}
}
-
- if ((trace_filename != "") || (g_tester_length != 0)) {
- if ((trace_filename != "") && (g_tester_length != 0)) {
- cerr << "Error: both a run length (-l) and a trace file (-z) have been specified." << endl;
- usageInstructions();
- }
- } else {
- cerr << "Error: either run length (-l) must be > 0 or a trace file (-z) must be specified." << endl;
- usageInstructions();
- }
}
static void usageInstructions()
@@ -408,10 +383,8 @@ static void usageInstructions()
counter++;
}
- cerr << "Option --processors (-p) is required." << endl;
- cerr << "Either option --length (-l) or --trace_input (-z) must be specified." << endl;
cerr << endl;
- g_debug_ptr->usageInstructions();
+ debug_ptr->usageInstructions();
cerr << endl;
exit(1);
diff --git a/src/mem/ruby/tester/test_framework.hh b/src/mem/ruby/tester/test_framework.hh
index 5e9e9363b..ade27a78f 100644
--- a/src/mem/ruby/tester/test_framework.hh
+++ b/src/mem/ruby/tester/test_framework.hh
@@ -37,10 +37,8 @@
#ifndef TESTFRAMEWORK_H
#define TESTFRAMEWORK_H
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/interfaces/mf_api.hh"
+#include "mem/ruby/tester/Global_Tester.hh"
void tester_main(int argc, char **argv);
-void tester_install_opal( mf_opal_api_t *opal_api, mf_ruby_api_t *ruby_api );
#endif //TESTFRAMEWORK_H
diff --git a/src/mem/slicc/ast/ASTs.hh b/src/mem/slicc/ast/ASTs.hh
index d0ed5698f..3363fbb09 100644
--- a/src/mem/slicc/ast/ASTs.hh
+++ b/src/mem/slicc/ast/ASTs.hh
@@ -72,6 +72,7 @@
#include "mem/slicc/ast/InfixOperatorExprAST.hh"
#include "mem/slicc/ast/FuncCallExprAST.hh"
#include "mem/slicc/ast/MethodCallExprAST.hh"
+#include "mem/slicc/ast/NewExprAST.hh"
#include "mem/slicc/ast/ChipComponentAccessAST.hh"
diff --git a/src/mem/slicc/ast/EnqueueStatementAST.cc b/src/mem/slicc/ast/EnqueueStatementAST.cc
index e323e67ac..744dfe1eb 100644
--- a/src/mem/slicc/ast/EnqueueStatementAST.cc
+++ b/src/mem/slicc/ast/EnqueueStatementAST.cc
@@ -77,7 +77,7 @@ void EnqueueStatementAST::generate(string& code, Type* return_type_ptr) const
code += ".enqueue(out_msg";
if (getPairs().exist("latency")) {
- code += ", " + getPairs().lookup("latency");
+ code += ", RubyConfig::get" + getPairs().lookup("latency") + "()";
}
code += ");\n";
diff --git a/src/mem/slicc/ast/MachineAST.cc b/src/mem/slicc/ast/MachineAST.cc
index 8c2f647be..2096db591 100644
--- a/src/mem/slicc/ast/MachineAST.cc
+++ b/src/mem/slicc/ast/MachineAST.cc
@@ -41,13 +41,17 @@
MachineAST::MachineAST(string* ident_ptr,
PairListAST* pairs_ptr,
+ Vector<TypeFieldAST*>* config_params_ptr,
+ std::vector<std::string*>* latency_vector,
DeclListAST* decl_list_ptr)
: DeclAST(pairs_ptr)
{
m_ident_ptr = ident_ptr;
m_pairs_ptr = pairs_ptr;
+ m_config_params_ptr = config_params_ptr;
m_decl_list_ptr = decl_list_ptr;
+ m_latency_vector = latency_vector;
}
MachineAST::~MachineAST()
@@ -65,7 +69,7 @@ void MachineAST::generate()
g_sym_table.pushFrame();
// Create a new machine
- machine_ptr = new StateMachine(*m_ident_ptr, getLocation(), getPairs());
+ machine_ptr = new StateMachine(*m_ident_ptr, getLocation(), getPairs(), m_latency_vector);
g_sym_table.newCurrentMachine(machine_ptr);
// Generate code for all the internal decls
diff --git a/src/mem/slicc/ast/MachineAST.hh b/src/mem/slicc/ast/MachineAST.hh
index 037803db5..8f83e4cfe 100644
--- a/src/mem/slicc/ast/MachineAST.hh
+++ b/src/mem/slicc/ast/MachineAST.hh
@@ -42,6 +42,7 @@
#include "mem/slicc/slicc_global.hh"
#include "mem/slicc/ast/DeclAST.hh"
#include "mem/slicc/ast/DeclListAST.hh"
+#include "mem/slicc/ast/TypeFieldAST.hh"
#include "mem/slicc/symbols/StateMachine.hh"
class MachineAST : public DeclAST {
@@ -49,6 +50,8 @@ public:
// Constructors
MachineAST(string* ident_ptr,
PairListAST* pairs_ptr,
+ Vector<TypeFieldAST*>* config_params_ptr,
+ std::vector<std::string*>* latency_vector,
DeclListAST* decl_list_ptr);
// Destructor
@@ -66,8 +69,10 @@ private:
MachineAST& operator=(const MachineAST& obj);
// Data Members (m_ prefix)
+ std::vector<std::string*>* m_latency_vector;
string* m_ident_ptr;
DeclListAST* m_decl_list_ptr;
+ Vector<TypeFieldAST*>* m_config_params_ptr;
PairListAST* m_pairs_ptr;
};
diff --git a/src/mem/slicc/ast/MethodCallExprAST.cc b/src/mem/slicc/ast/MethodCallExprAST.cc
index 0e2fed769..1bfe312ff 100644
--- a/src/mem/slicc/ast/MethodCallExprAST.cc
+++ b/src/mem/slicc/ast/MethodCallExprAST.cc
@@ -76,25 +76,38 @@ Type* MethodCallExprAST::generate(string& code) const
{
Type* obj_type_ptr = NULL;
+ string methodId;
+ Vector <Type*> paramTypes;
+
+ int actual_size = m_expr_vec_ptr->size();
+ for(int i=0; i<actual_size; i++) {
+ string tmp;
+ Type* actual_type_ptr = (*m_expr_vec_ptr)[i]->generate(tmp);
+ paramTypes.insertAtBottom(actual_type_ptr);
+ }
+
if(m_obj_expr_ptr) {
// member method call
+ string tmp;
+ obj_type_ptr = m_obj_expr_ptr->generate(tmp);
+ methodId = obj_type_ptr->methodId(*m_proc_name_ptr, paramTypes);
+ if (obj_type_ptr->methodReturnType(methodId)->isInterface())
+ code += "static_cast<" + obj_type_ptr->methodReturnType(methodId)->cIdent() + "&>";
code += "((";
- obj_type_ptr = m_obj_expr_ptr->generate(code);
-
+ code += tmp;
code += ").";
} else if (m_type_ptr) {
// class method call
code += "(" + m_type_ptr->toString() + "::";
obj_type_ptr = m_type_ptr->lookupType();
+ methodId = obj_type_ptr->methodId(*m_proc_name_ptr, paramTypes);
} else {
// impossible
assert(0);
}
- Vector <Type*> paramTypes;
-
// generate code
- int actual_size = m_expr_vec_ptr->size();
+ actual_size = m_expr_vec_ptr->size();
code += (*m_proc_name_ptr) + "(";
for(int i=0; i<actual_size; i++) {
if (i != 0) {
@@ -102,12 +115,9 @@ Type* MethodCallExprAST::generate(string& code) const
}
// Check the types of the parameter
Type* actual_type_ptr = (*m_expr_vec_ptr)[i]->generate(code);
- paramTypes.insertAtBottom(actual_type_ptr);
}
code += "))";
- string methodId = obj_type_ptr->methodId(*m_proc_name_ptr, paramTypes);
-
// Verify that this is a method of the object
if (!obj_type_ptr->methodExist(methodId)) {
error("Invalid method call: Type '" + obj_type_ptr->toString() + "' does not have a method '" + methodId + "'");
diff --git a/src/mem/slicc/ast/NewExprAST.cc b/src/mem/slicc/ast/NewExprAST.cc
new file mode 100644
index 000000000..95e57192f
--- /dev/null
+++ b/src/mem/slicc/ast/NewExprAST.cc
@@ -0,0 +1,9 @@
+
+#include "mem/slicc/ast/NewExprAST.hh"
+
+Type* NewExprAST::generate(string & code) const
+{
+ Type* type = m_type_ptr->lookupType();
+ code += "new " + type->cIdent();
+ return type;
+}
diff --git a/src/mem/slicc/ast/NewExprAST.hh b/src/mem/slicc/ast/NewExprAST.hh
new file mode 100644
index 000000000..375f130d6
--- /dev/null
+++ b/src/mem/slicc/ast/NewExprAST.hh
@@ -0,0 +1,20 @@
+#ifndef NEWEXPRAST_H
+#define NEWEXPRAST_H
+
+#include "mem/slicc/ast/ExprAST.hh"
+#include "mem/slicc/ast/TypeAST.hh"
+#include "mem/slicc/symbols/Type.hh"
+
+class NewExprAST : public ExprAST
+{
+public:
+ NewExprAST(TypeAST* type_ptr) : ExprAST() { m_type_ptr = type_ptr; }
+ Type* generate(string & code) const;
+ void print(ostream & out) const { out << "[NewExprAST: " << *m_type_ptr << "]"; }
+ string getName() const { return m_type_ptr->toString(); }
+
+private:
+ TypeAST* m_type_ptr;
+};
+
+#endif
diff --git a/src/mem/slicc/ast/ObjDeclAST.cc b/src/mem/slicc/ast/ObjDeclAST.cc
index f9349f9de..3569395db 100644
--- a/src/mem/slicc/ast/ObjDeclAST.cc
+++ b/src/mem/slicc/ast/ObjDeclAST.cc
@@ -108,32 +108,21 @@ void ObjDeclAST::generate()
c_code = "m_version";
} else if (*m_ident_ptr == "machineID") {
c_code = "m_machineID";
- } else if (*m_ident_ptr == "sequencer") {
- c_code = "*(dynamic_cast<"+m_type_ptr->toString()+"*>(m_chip_ptr->getSequencer(m_version)))";
- machineComponentSym = true;
- } /*else if (*m_ident_ptr == "xfdr_record_mgr") {
- c_code = "*(dynamic_cast<"+m_type_ptr->toString()+"*>(m_chip_ptr->getXfdrManager(m_version)))";
- machineComponentSym = true;
- } */else if (// getPairs().exist("network") || (m_type_ptr->lookupType()->existPair("cache"))
-// || (m_type_ptr->lookupType()->existPair("tbe")) ||
-// (m_type_ptr->lookupType()->existPair("newtbe")) ||
-// (m_type_ptr->lookupType()->existPair("timer")) ||
-// (m_type_ptr->lookupType()->existPair("dir")) ||
-// (m_type_ptr->lookupType()->existPair("persistent")) ||
-// (m_type_ptr->lookupType()->existPair("filter")) ||
-// (getPairs().exist("trigger_queue"))
- getPairs().exist("no_vector")) {
- c_code = "(*(m_chip_ptr->m_" + machine + *m_ident_ptr + "_ptr))";
- machineComponentSym = true;
} else {
- c_code = "(*(m_chip_ptr->m_" + machine + *m_ident_ptr + "_vec[m_version]))";
- machineComponentSym = true;
+ c_code = "(*m_" + machine + *m_ident_ptr + "_ptr)";
+ // c_code = "(*(m_chip_ptr->m_" + machine + *m_ident_ptr + "_ptr))";
+ // machineComponentSym = true;
}
Var* v = new Var(*m_ident_ptr, getLocation(), type_ptr, c_code,
getPairs(), g_sym_table.getStateMachine());
- g_sym_table.newSym(v);
+ StateMachine* machine_ptr = g_sym_table.getStateMachine();
+ if (machine_ptr != NULL) {
+ machine_ptr->addObj(v);
+ }// else {
+ g_sym_table.newSym(v);
+ //}
// used to cheat-- that is, access components in other machines
if (machineComponentSym) {
diff --git a/src/mem/slicc/parser/lexer.ll b/src/mem/slicc/parser/lexer.ll
index a4af2ac51..b2d36855b 100644
--- a/src/mem/slicc/parser/lexer.ll
+++ b/src/mem/slicc/parser/lexer.ll
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -26,11 +27,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * $Id$
+ */
+
%{
#include <assert.h>
#include "mem/slicc/ast/ASTs.hh"
-#include "mem/slicc/parser/parser.hh"
+#include "parser.hh"
#include <string>
extern "C" int yylex();
@@ -76,6 +81,7 @@ return { return RETURN; }
THIS { return THIS; }
CHIP { return CHIP; }
void { yylval.str_ptr = new string(yytext); return VOID; }
+new { return NEW; }
== { yylval.str_ptr = new string(yytext); return EQ; }
!= { yylval.str_ptr = new string(yytext); return NE; }
@@ -108,6 +114,7 @@ void { yylval.str_ptr = new string(yytext); return VOID; }
[0-9]*[.][0-9]* { yylval.str_ptr = new string(yytext); return FLOATNUMBER; }
[0-9]* { yylval.str_ptr = new string(yytext); return NUMBER; }
+
[a-zA-Z_][a-zA-Z_0-9]{0,50} { yylval.str_ptr = new string(yytext); return IDENT; }
\"[^"\n]*\" { yytext[strlen(yytext)-1] = '\0'; yylval.str_ptr = new string(yytext+1); return STRING; }
\'[^'\n]*\' { yytext[strlen(yytext)-1] = '\0'; yylval.str_ptr = new string(yytext+1); return STRING; }
diff --git a/src/mem/slicc/parser/parser.yy b/src/mem/slicc/parser/parser.yy
index 724184665..81cbec9c2 100644
--- a/src/mem/slicc/parser/parser.yy
+++ b/src/mem/slicc/parser/parser.yy
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -26,11 +27,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * $Id$
+ *
+ * */
+
%{
#include <string>
#include <stdio.h>
#include <assert.h>
#include "mem/slicc/ast/ASTs.hh"
+#include <vector>
#define YYMAXDEPTH 100000
#define YYERROR_VERBOSE
@@ -45,6 +52,7 @@ extern "C" int yylex();
%union {
string* str_ptr;
Vector<string>* string_vector_ptr;
+ std::vector<string*>* stdstring_vector_ptr;
// Decls
DeclAST* decl_ptr;
@@ -103,6 +111,8 @@ extern "C" int yylex();
%type <expr_ptr> expr literal enumeration
%type <expr_vector_ptr> expr_list
+%type <stdstring_vector_ptr> myrule
+
%type <pair_ptr> pair
%type <pair_list_ptr> pair_list pairs
@@ -115,7 +125,7 @@ extern "C" int yylex();
//%token DEQUEUE REMOVE_EARLY SKIP_EARLY PEEK_EARLY
%token DEBUG_EXPR_TOKEN DEBUG_MSG_TOKEN
%token ACTION_DECL TRANSITION_DECL TYPE_DECL STRUCT_DECL EXTERN_TYPE_DECL ENUM_DECL
-%token TYPE_FIELD OTHER IF ELSE RETURN
+%token TYPE_FIELD OTHER IF ELSE RETURN NEW
%token <str_ptr> EQ NE '<' '>' LE GE NOT AND OR PLUS DASH STAR SLASH RIGHTSHIFT LEFTSHIFT
@@ -138,7 +148,9 @@ decls: decl decls { $2->insertAtTop($1); $$ = $2; }
| { $$ = new Vector<DeclAST*>; }
;
-decl: MACHINE_DECL '(' ident pair_list ')' '{' decl_list '}' { $$ = new MachineAST($3, $4, $7); }
+decl: MACHINE_DECL '(' ident pair_list ')' ':' myrule '{' decl_list '}' { $$ = new MachineAST($3, $4, NULL, $7, $9); }
+// | MACHINE_DECL '(' ident pair_list ')' ':' type_members '{' decl_list '}' { $$ = new MachineAST($3, $4, $7, string_vector, $9); }
+ | MACHINE_DECL '(' ident pair_list ')' '{' decl_list '}' { $$ = new MachineAST($3, $4, NULL, new vector<string*>(), $7); }
| ACTION_DECL '(' ident pair_list ')' statement_list { $$ = new ActionDeclAST($3, $4, $6); }
| IN_PORT_DECL '(' ident ',' type ',' var pair_list ')' statement_list { $$ = new InPortDeclAST($3, $5, $7, $8, $10); }
| OUT_PORT_DECL '(' ident ',' type ',' var pair_list ')' SEMICOLON { $$ = new OutPortDeclAST($3, $5, $7, $8); }
@@ -214,7 +226,7 @@ formal_param : type ident { $$ = new FormalParamAST($1, $2); }
;
// Idents and lists
-ident: IDENT { $$ = $1; } ;
+ident: IDENT { $$ = $1; };
ident_list: '{' idents '}' { $$ = $2; }
| ident { $$ = new Vector<string>; $$->insertAtTop(*($1)); delete $1; }
@@ -274,7 +286,7 @@ expr: var { $$ = $1; }
| literal { $$ = $1; }
| enumeration { $$ = $1; }
| ident '(' expr_list ')' { $$ = new FuncCallExprAST($1, $3); }
-
+ | NEW type { $$ = new NewExprAST($2); }
// globally access a local chip component and call a method
| THIS DOT var '[' expr ']' DOT var DOT ident '(' expr_list ')' { $$ = new ChipComponentAccessAST($3, $5, $8, $10, $12 ); }
@@ -324,6 +336,10 @@ var: ident { $$ = new VarExprAST($1); }
field: ident { $$ = $1; }
;
+myrule: myrule IDENT { $1->push_back($2); }
+ | IDENT { $$ = new vector<string*>(1, $1); }
+ ;
+
%%
extern FILE *yyin;
@@ -337,7 +353,7 @@ DeclListAST* parse(string filename)
exit(1);
}
g_line_number = 1;
- g_file_name() = filename;
+ g_file_name = filename;
yyin = file;
g_decl_list_ptr = NULL;
yyparse();
@@ -346,7 +362,7 @@ DeclListAST* parse(string filename)
extern "C" void yyerror(char* s)
{
- fprintf(stderr, "%s:%d: %s at %s\n", g_file_name().c_str(), g_line_number, s, yytext);
+ fprintf(stderr, "%s:%d: %s at %s\n", g_file_name.c_str(), g_line_number, s, yytext);
exit(1);
}
diff --git a/src/mem/slicc/symbols/Func.cc b/src/mem/slicc/symbols/Func.cc
index 6bc763300..d29138b38 100644
--- a/src/mem/slicc/symbols/Func.cc
+++ b/src/mem/slicc/symbols/Func.cc
@@ -88,7 +88,7 @@ void Func::funcPrototype(string& code) const
}
// This write a function of object Chip
-void Func::writeCFiles(string path) const
+void Func::writeCFiles(string path)
{
if (isExternal()) {
// Do nothing
@@ -99,9 +99,8 @@ void Func::writeCFiles(string path) const
out << "/** Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< " */" << endl;
out << endl;
out << "#include \"mem/protocol/Types.hh\"" << endl;
- out << "#include \"mem/protocol/Chip.hh\"" << endl;
if (m_isInternalMachineFunc) {
- out << "#include \"" << m_machineStr << "_Controller.hh\"" << endl;
+ out << "#include \"mem/protocol/" << m_machineStr << "_Controller.hh\"" << endl;
}
out << endl;
diff --git a/src/mem/slicc/symbols/Func.hh b/src/mem/slicc/symbols/Func.hh
index 6ceaba378..8f8548bb0 100644
--- a/src/mem/slicc/symbols/Func.hh
+++ b/src/mem/slicc/symbols/Func.hh
@@ -57,7 +57,7 @@ public:
string cIdent() const { return m_c_ident; }
const Vector<Type*>& getParamTypes() const { return m_param_type_vec; }
Type* getReturnType() const { return m_type_ptr; }
- void writeCFiles(string path) const;
+ void writeCFiles(string path) ;
void funcPrototype(string& code) const;
bool isExternal() const { return existPair("external"); }
bool isInternalMachineFunc() const { return m_isInternalMachineFunc; }
diff --git a/src/mem/slicc/symbols/StateMachine.cc b/src/mem/slicc/symbols/StateMachine.cc
index 6aaa0ebca..f2a40d3d7 100644
--- a/src/mem/slicc/symbols/StateMachine.cc
+++ b/src/mem/slicc/symbols/StateMachine.cc
@@ -44,10 +44,13 @@
#include "mem/gems_common/util.hh"
#include "mem/gems_common/Vector.hh"
-StateMachine::StateMachine(string ident, const Location& location, const Map<string, string>& pairs)
+#include <set>
+
+StateMachine::StateMachine(string ident, const Location& location, const Map<string, string>& pairs, std::vector<std::string*>* latency_vector)
: Symbol(ident, location, pairs)
{
m_table_built = false;
+ m_latency_vector = *latency_vector;
}
StateMachine::~StateMachine()
@@ -167,11 +170,18 @@ const Transition* StateMachine::getTransPtr(int stateIndex, int eventIndex) cons
// ******* C Files ******* //
// *********************** //
-void StateMachine::writeCFiles(string path) const
+void StateMachine::writeCFiles(string path)
{
string comp = getIdent();
string filename;
+ // Output the method declarations for the class declaration
+ {
+ ostringstream sstr;
+ printControllerH(sstr, comp);
+ conditionally_write_file(path + comp + "_Controller.hh", sstr);
+ }
+
// Output switch statement for transition table
{
ostringstream sstr;
@@ -186,13 +196,6 @@ void StateMachine::writeCFiles(string path) const
conditionally_write_file(path + comp + "_Controller.cc", sstr);
}
- // Output the method declarations for the class declaration
- {
- ostringstream sstr;
- printControllerH(sstr, comp);
- conditionally_write_file(path + comp + "_Controller.hh", sstr);
- }
-
// Output the wakeup loop for the events
{
ostringstream sstr;
@@ -219,8 +222,11 @@ void StateMachine::writeCFiles(string path) const
}
-void StateMachine::printControllerH(ostream& out, string component) const
+void StateMachine::printControllerH(ostream& out, string component)
{
+
+ m_message_buffer_names.clear();
+
out << "/** \\file " << getIdent() << ".hh" << endl;
out << " * " << endl;
out << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
@@ -232,29 +238,59 @@ void StateMachine::printControllerH(ostream& out, string component) const
out << endl;
out << "#include \"mem/ruby/common/Global.hh\"" << endl;
out << "#include \"mem/ruby/common/Consumer.hh\"" << endl;
+ out << "#include \"mem/ruby/slicc_interface/AbstractController.hh\"" << endl;
out << "#include \"mem/protocol/TransitionResult.hh\"" << endl;
out << "#include \"mem/protocol/Types.hh\"" << endl;
out << "#include \"mem/protocol/" << component << "_Profiler.hh\"" << endl;
+
+ // include object classes
+ std::set<string> seen_types;
+ for(int i=0; i<numObjects(); i++) {
+ Var* var = m_objs[i];
+ if (seen_types.count(var->getType()->cIdent()) == 0) {
+ out << "#include \"mem/protocol/" << var->getType()->cIdent() << ".hh\"" << endl;
+ // out << "class " << var->getType()->cIdent() << ";" << endl;
+ seen_types.insert(var->getType()->cIdent());
+ }
+ }
+
out << endl;
// for adding information to the protocol debug trace
out << "extern stringstream " << component << "_" << "transitionComment;" << endl;
- out << "class " << component << "_Controller : public Consumer {" << endl;
+ out << "class " << component << "_Controller : public AbstractController {" << endl;
/* the coherence checker needs to call isBlockExclusive() and isBlockShared()
making the Chip a friend class is an easy way to do this for now */
out << "#ifdef CHECK_COHERENCE" << endl;
- out << " friend class Chip;" << endl;
out << "#endif /* CHECK_COHERENCE */" << endl;
out << "public:" << endl;
- out << " " << component << "_Controller(Chip* chip_ptr, int version);" << endl;
+ // out << " " << component << "_Controller(int version, Network* net_ptr);" << endl;
+ out << " " << component << "_Controller(const string & name);" << endl;
+ out << " static int getNumControllers();" << endl;
+ out << " void init(Network* net_ptr, const vector<string> & argv);" << endl;
+ out << " MessageBuffer* getMandatoryQueue() const;" << endl;
+ out << " const int & getVersion() const;" << endl;
+ out << " const string toString() const;" << endl;
+ out << " const string getName() const;" << endl;
+ out << " const MachineType getMachineType() const;" << endl;
out << " void print(ostream& out) const;" << endl;
+ out << " void printConfig(ostream& out) const;" << endl;
out << " void wakeup();" << endl;
- out << " static void dumpStats(ostream& out) { s_profiler.dumpStats(out); }" << endl;
- out << " static void clearStats() { s_profiler.clearStats(); }" << endl;
+ out << " void printStats(ostream& out) const { s_profiler.dumpStats(out); }" << endl;
+ out << " void clearStats() { s_profiler.clearStats(); }" << endl;
out << "private:" << endl;
+
+//added by SS
+// found_to_mem = 0;
+ std::vector<std::string*>::const_iterator it;
+ for(it=m_latency_vector.begin();it!=m_latency_vector.end();it++){
+ out << " int m_" << (*it)->c_str() << ";" << endl;
+ }
+ out << " int m_number_of_TBEs;" << endl;
+
out << " TransitionResult doTransition(" << component << "_Event event, " << component
<< "_State state, const Address& addr";
if(CHECK_INVALID_RESOURCE_STALLS) {
@@ -267,11 +303,16 @@ void StateMachine::printControllerH(ostream& out, string component) const
out << ", int priority";
}
out << "); // in " << component << "_Transitions.cc" << endl;
- out << " Chip* m_chip_ptr;" << endl;
- out << " NodeID m_id;" << endl;
+ out << " string m_name;" << endl;
+ out << " int m_transitions_per_cycle;" << endl;
+ out << " int m_buffer_size;" << endl;
+ out << " int m_recycle_latency;" << endl;
+ out << " map< string, string > m_cfg;" << endl;
out << " NodeID m_version;" << endl;
+ out << " Network* m_net_ptr;" << endl;
out << " MachineID m_machineID;" << endl;
out << " static " << component << "_Profiler s_profiler;" << endl;
+ out << " static int m_num_controllers;" << endl;
// internal function protypes
out << " // Internal functions" << endl;
@@ -290,11 +331,30 @@ void StateMachine::printControllerH(ostream& out, string component) const
out << "/** \\brief " << action.getDescription() << "*/" << endl;
out << " void " << action.getIdent() << "(const Address& addr);" << endl;
}
+
+ // the controller internal variables
+ out << " // Object" << endl;
+ for(int i=0; i < numObjects(); i++) {
+ const Var* var = m_objs[i];
+ string template_hack = "";
+ if (var->existPair("template_hack")) {
+ template_hack = var->lookupPair("template_hack");
+ }
+ out << " " << var->getType()->cIdent() << template_hack << "* m_"
+ << var->cIdent() << "_ptr;" << endl;
+
+ string str = "m_"+ var->cIdent() + "_ptr";
+ if (var->getType()->cIdent() == "MessageBuffer")
+ m_message_buffer_names.push_back(str);
+
+ }
+
+
out << "};" << endl;
out << "#endif // " << component << "_CONTROLLER_H" << endl;
}
-void StateMachine::printControllerC(ostream& out, string component) const
+void StateMachine::printControllerC(ostream& out, string component)
{
out << "/** \\file " << getIdent() << ".cc" << endl;
out << " * " << endl;
@@ -309,9 +369,22 @@ void StateMachine::printControllerC(ostream& out, string component) const
out << "#include \"mem/protocol/" << component << "_Event.hh\"" << endl;
out << "#include \"mem/protocol/Types.hh\"" << endl;
out << "#include \"mem/ruby/system/System.hh\"" << endl;
- out << "#include \"mem/protocol/Chip.hh\"" << endl;
+
+ // include object classes
+ std::set<string> seen_types;
+ for(int i=0; i<numObjects(); i++) {
+ Var* var = m_objs[i];
+ if (seen_types.count(var->getType()->cIdent()) == 0) {
+ out << "#include \"mem/protocol/" << var->getType()->cIdent() << ".hh\"" << endl;
+ seen_types.insert(var->getType()->cIdent());
+ }
+
+ }
+
out << endl;
+ out << "int " << component << "_Controller::m_num_controllers = 0;" << endl;
+
// for adding information to the protocol debug trace
out << "stringstream " << component << "_" << "transitionComment;" << endl;
out << "#define APPEND_TRANSITION_COMMENT(str) (" << component << "_" << "transitionComment << str)" << endl;
@@ -322,26 +395,220 @@ void StateMachine::printControllerC(ostream& out, string component) const
out << "/** \\brief constructor */" << endl;
out << component << "_Controller::" << component
- << "_Controller(Chip* chip_ptr, int version)" << endl;
+ // << "_Controller(int version, Network* net_ptr)" << endl;
+ << "_Controller(const string & name)" << endl;
+ out << " : m_name(name)" << endl;
+ out << "{ " << endl;
+ out << " m_num_controllers++; " << endl;
+ for(int i=0; i < numObjects(); i++) {
+ const Var* var = m_objs[i];
+ if ( var->cIdent().find("mandatoryQueue") != string::npos)
+ out << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << "();" << endl;
+ }
+ out << "}" << endl << endl;
+
+ out << "void " << component << "_Controller::init(Network * net_ptr, const vector<string> & argv)" << endl;
out << "{" << endl;
- out << " m_chip_ptr = chip_ptr;" << endl;
- out << " m_id = m_chip_ptr->getID();" << endl;
- out << " m_version = version;" << endl;
+ out << " for (size_t i=0; i < argv.size(); i+=2) {" << endl;
+// out << " printf (\"ARG: %s = %s \\n \", argv[i].c_str(), argv[i+1].c_str());"<< endl;
+
+ out << " if (argv[i] == \"version\") " << endl;
+ out << " m_version = atoi(argv[i+1].c_str());" << endl;
+ out << " else if (argv[i] == \"transitions_per_cycle\") " << endl;
+ out << " m_transitions_per_cycle = atoi(argv[i+1].c_str());" << endl;
+ out << " else if (argv[i] == \"buffer_size\") " << endl;
+ out << " m_buffer_size = atoi(argv[i+1].c_str());" << endl;
+//added by SS
+ out << " else if (argv[i] == \"recycle_latency\") " << endl;
+ out << " m_recycle_latency = atoi(argv[i+1].c_str());" << endl;
+//added by SS --> for latency
+//for loop on latency_vector to check with argv[i] and assign the value to the related m_latency ...
+ out << " else if (argv[i] == \"number_of_TBEs\") " << endl;
+ out << " m_number_of_TBEs = atoi(argv[i+1].c_str());" << endl;
+
+ if (m_latency_vector.size()) {
+ out << " else { " << endl;
+ std::vector<std::string*>::const_iterator it;
+ for(it=m_latency_vector.begin();it!=m_latency_vector.end();it++) {
+ string str = (*it)->c_str();
+ str.erase(0,8);
+//convert to lowercase
+ size_t i;
+ char* strc = (char*) malloc (str.length()+1);
+ strc[str.length()]=0;
+ for(i=0; i < str.length(); i++) {
+ strc[i] = str.at(i);
+ strc[i] = tolower(strc[i]);
+ }
+ str = strc;
+ delete strc;
+ out << " if (argv[i] == \"" << str << "\"){" << endl;
+ if (str == "to_mem_ctrl_latency")
+ out << " m_" << (*it)->c_str() << "=" << "atoi(argv[i+1].c_str())+(random() % 5);" << endl;
+ else
+ out << " m_" << (*it)->c_str() << "=" << "atoi(argv[i+1].c_str());" << endl;
+// out << " printf (\"SET m_" << it->c_str() << "= %i \\n \", m_" << it->c_str() << ");" << endl;
+ out << " }" << endl;
+ }
+ out << " }" << endl;
+ }
+ out << " }" << endl;
+
+ out << " m_net_ptr = net_ptr;" << endl;
out << " m_machineID.type = MachineType_" << component << ";" << endl;
- out << " m_machineID.num = m_id*RubyConfig::numberOf"<< component << "PerChip()+m_version;" << endl;
+ out << " m_machineID.num = m_version;" << endl;
+
+// out << " printf (\"I set m_LATENCY_ISSUE_LATENCY to %i \\n \", m_LATENCY_ISSUE_LATENCY);" << endl;
+// out << " printf (\"I set m_LATENCY_CACHE_RESPONSE_LATENCY to %i \\n \", m_LATENCY_CACHE_RESPONSE_LATENCY);" << endl;
+
+ // make configuration array
+ out << " for (size_t i=0; i < argv.size(); i+=2) {" << endl;
+ out << " if (argv[i] != \"version\") " << endl;
+ out << " m_cfg[argv[i]] = argv[i+1];" << endl;
+ out << " }" << endl;
+
+ out << endl;
+
+ // initialize objects
+ out << " // Objects" << endl;
+ for(int i=0; i < numObjects(); i++) {
+ const Var* var = m_objs[i];
+ if (!var->existPair("network")) {
+ // Not a network port object
+ if (var->getType()->existPair("primitive")) {
+ out << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << ";\n";
+ if (var->existPair("default")) {
+ out << " (*m_" << var->cIdent() << "_ptr) = " << var->lookupPair("default") << ";\n";
+ }
+ out << " }\n";
+
+ } else {
+ // Normal Object
+ string template_hack = "";
+ if (var->existPair("template_hack")) {
+ template_hack = var->lookupPair("template_hack");
+ }
+//added by SS
+ string str = "";
+ int found = 0;
+ if (var->existPair("factory")) {
+ out << " m_" << var->cIdent() << "_ptr = " << var->lookupPair("factory");
+ } else {
+ if ( var->cIdent().find("mandatoryQueue") == string::npos) {
+
+ str = " m_" + var->cIdent() + "_ptr = new " + var->getType()->cIdent() + template_hack;
+ out << str;
+ if (str.find("TBETable")!=string::npos){
+ found = 1;
+ }
+
+ if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
+ str = "";
+ if (var->existPair("constructor_hack")) {
+ string constructor_hack = var->lookupPair("constructor_hack");
+ str = "(" + constructor_hack + ")";
+ } else {
+ str = "()";
+ }
+ if (found)
+ str = "(m_number_of_TBEs)";
+ out << str;
+ }
+ }
+ }
+
+ out << ";\n";
+ out << " assert(m_" << var->cIdent() << "_ptr != NULL);" << endl;
+
+ if (var->existPair("default")) {
+ out << " (*m_" << var->cIdent() << "_ptr) = " << var->lookupPair("default")
+ << "; // Object default" << endl;
+ } else if (var->getType()->hasDefault()) {
+ out << " (*m_" << var->cIdent() << "_ptr) = " << var->getType()->getDefault()
+ << "; // Type " << var->getType()->getIdent() << " default" << endl;
+ }
+
+ // Set ordering
+ if (var->existPair("ordered") && !var->existPair("trigger_queue")) {
+ // A buffer
+ string ordered = var->lookupPair("ordered");
+ out << " m_" << var->cIdent() << "_ptr->setOrdering(" << ordered << ");\n";
+ }
+
+ // Set randomization
+ if (var->existPair("random")) {
+ // A buffer
+ string value = var->lookupPair("random");
+ out << " m_" << var->cIdent() << "_ptr->setRandomization(" << value << ");\n";
+ }
+
+ // Set Priority
+ if (var->getType()->isBuffer() && var->existPair("rank") && !var->existPair("trigger_queue")) {
+ string rank = var->lookupPair("rank");
+ out << " m_" << var->cIdent() << "_ptr->setPriority(" << rank << ");\n";
+ }
+ }
+ } else {
+ // Network port object
+ string network = var->lookupPair("network");
+ string ordered = var->lookupPair("ordered");
+ string vnet = var->lookupPair("virtual_network");
+
+ assert (var->getMachine() != NULL);
+ out << " m_" << var->cIdent() << "_ptr = m_net_ptr->get"
+ << network << "NetQueue(m_version+MachineType_base_number(string_to_MachineType(\""
+ << var->getMachine()->getIdent() << "\")), "
+ << ordered << ", " << vnet << ");\n";
+ out << " assert(m_" << var->cIdent() << "_ptr != NULL);" << endl;
+
+ // Set ordering
+ if (var->existPair("ordered")) {
+ // A buffer
+ string ordered = var->lookupPair("ordered");
+ out << " m_" << var->cIdent() << "_ptr->setOrdering(" << ordered << ");\n";
+ }
+
+ // Set randomization
+ if (var->existPair("random")) {
+ // A buffer
+ string value = var->lookupPair("random");
+ out << " m_" << var->cIdent() << "_ptr->setRandomization(" << value << ");\n";
+ }
+
+ // Set Priority
+ if (var->existPair("rank")) {
+ string rank = var->lookupPair("rank");
+ out << " m_" << var->cIdent() << "_ptr->setPriority(" << rank << ");\n";
+ }
+
+ // Set buffer size
+ if (var->getType()->isBuffer()) {
+ out << " if (m_buffer_size > 0) {\n";
+ out << " m_" << var->cIdent() << "_ptr->setSize(m_buffer_size);\n";
+ out << " }\n";
+ }
+
+ // set description (may be overriden later by port def)
+ out << " m_" << var->cIdent()
+ << "_ptr->setDescription(\"[Version \" + int_to_string(m_version) + \", "
+ << component << ", name=" << var->cIdent() << "]\");" << endl;
+ out << endl;
+ }
+ }
// Set the queue consumers
+ out << endl;
for(int i=0; i < m_in_ports.size(); i++) {
const Var* port = m_in_ports[i];
out << " " << port->getCode() << ".setConsumer(this);" << endl;
}
- out << endl;
// Set the queue descriptions
+ out << endl;
for(int i=0; i < m_in_ports.size(); i++) {
const Var* port = m_in_ports[i];
out << " " << port->getCode()
- << ".setDescription(\"[Chip \" + int_to_string(m_chip_ptr->getID()) + \" \" + int_to_string(m_version) + \", "
+ << ".setDescription(\"[Version \" + int_to_string(m_version) + \", "
<< component << ", " << port->toString() << "]\");" << endl;
}
@@ -368,12 +635,72 @@ void StateMachine::printControllerC(ostream& out, string component) const
}
}
+ //added by SS to initialize recycle_latency of message buffers
+ std::vector<std::string>::const_iterator it;
+ for ( it=m_message_buffer_names.begin() ; it != m_message_buffer_names.end(); it++ ){
+ out << " "<< (*it).c_str() << "->setRecycleLatency(m_recycle_latency);" << endl;
+ }
+
+
+ out << "}" << endl;
+
+ out << endl;
+
+ bool has_mandatory_q = false;
+ for(int i=0; i < m_in_ports.size(); i++) {
+ if (m_in_ports[i]->getCode().find("mandatoryQueue_ptr")!= string::npos)
+ has_mandatory_q = true;
+ }
+
+ out << "int " << component << "_Controller::getNumControllers() {" << endl;
+ out << " return m_num_controllers;" << endl;
out << "}" << endl;
out << endl;
+ out << "MessageBuffer* " << component << "_Controller::getMandatoryQueue() const {" << endl;
+ if (has_mandatory_q)
+ out << " return m_" << component << "_mandatoryQueue_ptr;" << endl;
+ else
+ out << " return NULL;" << endl;
+ out << "}" << endl;
+
+ out << endl;
+
+ out << "const int & "<<component<<"_Controller::getVersion() const{" << endl;
+ out << " return m_version;" << endl;
+ out << "}";
+
+ out << endl;
+
+ out << "const string "<<component<<"_Controller::toString() const{" << endl;
+ out << " return \"" << component<< "_Controller\";" << endl;
+ out << "}";
+
+ out << endl;
+
+ out << "const string "<<component<<"_Controller::getName() const{" << endl;
+ out << " return m_name;" << endl;
+ out << "}";
+
+ out << endl;
+
+ out << "const MachineType "<<component<<"_Controller::getMachineType() const{" << endl;
+ out << " return MachineType_" << component<< ";" << endl;
+ out << "}";
+
+ out << endl;
+
out << "void " << component << "_Controller::print(ostream& out) const { out << \"[" << component
- << "_Controller \" << m_chip_ptr->getID() << \" \" << m_version << \"]\"; }" << endl;
+ << "_Controller \" << m_version << \"]\"; }" << endl;
+
+ out << "void " << component << "_Controller::printConfig(ostream& out) const {" << endl;
+ out << " out << \"" << component << "_Controller config: \" << m_name << endl;" << endl;
+ out << " out << \" version: \" << m_version << endl;" << endl;
+ out << " for(map< string, string >::const_iterator it = m_cfg.begin(); it != m_cfg.end(); it++) {" << endl;
+ out << " out << \" \" << (*it).first << \": \" << (*it).second << endl;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
out << endl;
out << "// Actions" << endl;
@@ -387,14 +714,39 @@ void StateMachine::printControllerC(ostream& out, string component) const
<< action.getIdent() << "(const Address& addr)" << endl;
out << "{" << endl;
out << " DEBUG_MSG(GENERATED_COMP, HighPrio,\"executing\");" << endl;
- out << action.lookupPair("c_code");
+//added by SS
+//instead of rubyconfig:: --> it should point to m_latency...
+//so I should change the string output of this lookup
+
+ string c_code_string = action.lookupPair("c_code");
+
+ size_t found = c_code_string.find("RubyConfig::get");
+
+ if (found!=string::npos){ //found --> replace it with local access
+ //if it is related to latency --> replace it
+ std::vector<std::string*>::const_iterator it;
+ for(it=m_latency_vector.begin();it!=m_latency_vector.end();it++){
+ string str = (*it)->c_str();
+ str.erase(0,8);
+ size_t fd = c_code_string.find(str, found);
+ if (fd!=string::npos && (fd == found+15)){
+ string rstr = "m_";
+ rstr += (*it)->c_str();
+ c_code_string.replace(found,15+str.size()+2,rstr);
+ break;
+ }
+ }
+ }
+
+ out << c_code_string;
+
out << "}" << endl;
}
out << endl;
}
}
-void StateMachine::printCWakeup(ostream& out, string component) const
+void StateMachine::printCWakeup(ostream& out, string component)
{
out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
out << "// " << getIdent() << ": " << getShorthand() << endl;
@@ -406,7 +758,6 @@ void StateMachine::printCWakeup(ostream& out, string component) const
out << "#include \"mem/protocol/" << component << "_Event.hh\"" << endl;
out << "#include \"mem/protocol/Types.hh\"" << endl;
out << "#include \"mem/ruby/system/System.hh\"" << endl;
- out << "#include \"mem/protocol/Chip.hh\"" << endl;
out << endl;
out << "void " << component << "_Controller::wakeup()" << endl;
out << "{" << endl;
@@ -416,8 +767,8 @@ void StateMachine::printCWakeup(ostream& out, string component) const
out << "int counter = 0;" << endl;
out << " while (true) {" << endl;
out << " // Some cases will put us into an infinite loop without this limit" << endl;
- out << " assert(counter <= RubyConfig::" << getIdent() << "TransitionsPerCycle());" << endl;
- out << " if (counter == RubyConfig::" << getIdent() << "TransitionsPerCycle()) {" << endl;
+ out << " assert(counter <= m_transitions_per_cycle);" << endl;
+ out << " if (counter == m_transitions_per_cycle) {" << endl;
out << " g_system_ptr->getProfiler()->controllerBusy(m_machineID); // Count how often we're fully utilized" << endl;
out << " g_eventQueue_ptr->scheduleEvent(this, 1); // Wakeup in another cycle and try again" << endl;
out << " break;" << endl;
@@ -442,7 +793,7 @@ void StateMachine::printCWakeup(ostream& out, string component) const
out << endl;
}
-void StateMachine::printCSwitch(ostream& out, string component) const
+void StateMachine::printCSwitch(ostream& out, string component)
{
out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
out << "// " << getIdent() << ": " << getShorthand() << endl;
@@ -453,7 +804,6 @@ void StateMachine::printCSwitch(ostream& out, string component) const
out << "#include \"mem/protocol/" << component << "_Event.hh\"" << endl;
out << "#include \"mem/protocol/Types.hh\"" << endl;
out << "#include \"mem/ruby/system/System.hh\"" << endl;
- out << "#include \"mem/protocol/Chip.hh\"" << endl;
out << endl;
out << "#define HASH_FUN(state, event) ((int(state)*" << component
<< "_Event_NUM)+int(event))" << endl;
@@ -490,9 +840,9 @@ void StateMachine::printCSwitch(ostream& out, string component) const
out << " DEBUG_EXPR(GENERATED_COMP, MedPrio, next_state);" << endl;
out << " DEBUG_NEWLINE(GENERATED_COMP, MedPrio);" << endl;
out << " s_profiler.countTransition(state, event);" << endl;
- out << " if (PROTOCOL_DEBUG_TRACE) {" << endl
+ out << " if (Debug::getProtocolTrace()) {" << endl
<< " g_system_ptr->getProfiler()->profileTransition(\"" << component
- << "\", m_chip_ptr->getID(), m_version, addr, " << endl
+ << "\", m_version, addr, " << endl
<< " " << component << "_State_to_string(state), " << endl
<< " " << component << "_Event_to_string(event), " << endl
<< " " << component << "_State_to_string(next_state), GET_TRANSITION_COMMENT());" << endl
@@ -501,9 +851,9 @@ void StateMachine::printCSwitch(ostream& out, string component) const
out << " " << component << "_setState(addr, next_state);" << endl;
out << " " << endl;
out << " } else if (result == TransitionResult_ResourceStall) {" << endl;
- out << " if (PROTOCOL_DEBUG_TRACE) {" << endl
+ out << " if (Debug::getProtocolTrace()) {" << endl
<< " g_system_ptr->getProfiler()->profileTransition(\"" << component
- << "\", m_chip_ptr->getID(), m_version, addr, " << endl
+ << "\", m_version, addr, " << endl
<< " " << component << "_State_to_string(state), " << endl
<< " " << component << "_Event_to_string(event), " << endl
<< " " << component << "_State_to_string(next_state), " << endl
@@ -512,9 +862,9 @@ void StateMachine::printCSwitch(ostream& out, string component) const
out << " } else if (result == TransitionResult_ProtocolStall) {" << endl;
out << " DEBUG_MSG(GENERATED_COMP,HighPrio,\"stalling\");" << endl
<< " DEBUG_NEWLINE(GENERATED_COMP, MedPrio);" << endl;
- out << " if (PROTOCOL_DEBUG_TRACE) {" << endl
+ out << " if (Debug::getProtocolTrace()) {" << endl
<< " g_system_ptr->getProfiler()->profileTransition(\"" << component
- << "\", m_chip_ptr->getID(), m_version, addr, " << endl
+ << "\", m_version, addr, " << endl
<< " " << component << "_State_to_string(state), " << endl
<< " " << component << "_Event_to_string(event), " << endl
<< " " << component << "_State_to_string(next_state), " << endl
@@ -630,7 +980,6 @@ void StateMachine::printCSwitch(ostream& out, string component) const
}
out << " default:" << endl;
- out << " WARN_EXPR(m_id);" << endl;
out << " WARN_EXPR(m_version);" << endl;
out << " WARN_EXPR(g_eventQueue_ptr->getTime());" << endl;
out << " WARN_EXPR(addr);" << endl;
@@ -642,7 +991,7 @@ void StateMachine::printCSwitch(ostream& out, string component) const
out << "}" << endl;
}
-void StateMachine::printProfilerH(ostream& out, string component) const
+void StateMachine::printProfilerH(ostream& out, string component)
{
out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
out << "// " << getIdent() << ": " << getShorthand() << endl;
@@ -669,7 +1018,7 @@ void StateMachine::printProfilerH(ostream& out, string component) const
out << "#endif // " << component << "_PROFILER_H" << endl;
}
-void StateMachine::printProfilerC(ostream& out, string component) const
+void StateMachine::printProfilerC(ostream& out, string component)
{
out << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< endl;
out << "// " << getIdent() << ": " << getShorthand() << endl;
@@ -769,7 +1118,7 @@ string frameRef(string href, string target, string target_num, string text)
}
-void StateMachine::writeHTMLFiles(string path) const
+void StateMachine::writeHTMLFiles(string path)
{
string filename;
string component = getIdent();
@@ -841,7 +1190,7 @@ void StateMachine::writeHTMLFiles(string path) const
}
}
-void StateMachine::printHTMLTransitions(ostream& out, int active_state) const
+void StateMachine::printHTMLTransitions(ostream& out, int active_state)
{
// -- Prolog
out << "<HTML><BODY link=\"blue\" vlink=\"blue\">" << endl;
diff --git a/src/mem/slicc/symbols/StateMachine.hh b/src/mem/slicc/symbols/StateMachine.hh
index 02ab12881..101e38547 100644
--- a/src/mem/slicc/symbols/StateMachine.hh
+++ b/src/mem/slicc/symbols/StateMachine.hh
@@ -39,6 +39,9 @@
#include "mem/gems_common/Vector.hh"
#include "mem/gems_common/Map.hh"
#include "mem/slicc/symbols/Symbol.hh"
+#include <list>
+
+using namespace std;
class Transition;
class Event;
@@ -50,7 +53,7 @@ class Func;
class StateMachine : public Symbol {
public:
// Constructors
- StateMachine(string ident, const Location& location, const Map<string, string>& pairs);
+ StateMachine(string ident, const Location& location, const Map<string, string>& pairs, std::vector<std::string*>* latency_vector);
// Destructor
~StateMachine();
@@ -65,6 +68,7 @@ public:
void addTransition(Transition* trans_ptr);
void addInPort(Var* var) { m_in_ports.insertAtBottom(var); }
void addFunc(Func* func);
+ void addObj(Var* obj) { m_objs.insertAtBottom(obj); }
// Accessors to vectors
const State& getState(int index) const { return *m_states[index]; }
@@ -72,21 +76,26 @@ public:
const Action& getAction(int index) const { return *m_actions[index]; }
const Transition& getTransition(int index) const { return *m_transitions[index]; }
const Transition* getTransPtr(int stateIndex, int eventIndex) const;
+ const Var& getObject(int index) const { return *m_objs[index]; }
// Accessors for size of vectors
int numStates() const { return m_states.size(); }
int numEvents() const { return m_events.size(); }
int numActions() const { return m_actions.size(); }
int numTransitions() const { return m_transitions.size(); }
+ int numObjects() const { return m_objs.size(); }
void buildTable(); // Needs to be called before accessing the table
// Code generator methods
- void writeCFiles(string path) const;
- void writeHTMLFiles(string path) const;
+ void writeCFiles(string path) ;
+ void writeHTMLFiles(string path) ;
void print(ostream& out) const { out << "[StateMachine: " << toString() << "]" << endl; }
private:
+
+ std::vector<std::string*> m_latency_vector;
+
// Private Methods
void checkForDuplicate(const Symbol& sym) const;
@@ -97,14 +106,14 @@ private:
// StateMachine(const StateMachine& obj);
// StateMachine& operator=(const StateMachine& obj);
- void printControllerH(ostream& out, string component) const;
- void printControllerC(ostream& out, string component) const;
- void printCWakeup(ostream& out, string component) const;
- void printCSwitch(ostream& out, string component) const;
- void printProfilerH(ostream& out, string component) const;
- void printProfilerC(ostream& out, string component) const;
+ void printControllerH(ostream& out, string component) ;
+ void printControllerC(ostream& out, string component) ;
+ void printCWakeup(ostream& out, string component) ;
+ void printCSwitch(ostream& out, string component) ;
+ void printProfilerH(ostream& out, string component) ;
+ void printProfilerC(ostream& out, string component) ;
- void printHTMLTransitions(ostream& out, int active_state) const;
+ void printHTMLTransitions(ostream& out, int active_state) ;
// Data Members (m_ prefix)
Vector<State*> m_states;
@@ -118,10 +127,15 @@ private:
Vector<Var*> m_in_ports;
+ Vector<Var*> m_objs;
+
// Table variables
bool m_table_built;
Vector<Vector<Transition*> > m_table;
+ //added by SS
+ std::vector<std::string> m_message_buffer_names;
+
};
// Output operator declaration
diff --git a/src/mem/slicc/symbols/Symbol.hh b/src/mem/slicc/symbols/Symbol.hh
index 1b4bd517a..4a1c5e44e 100644
--- a/src/mem/slicc/symbols/Symbol.hh
+++ b/src/mem/slicc/symbols/Symbol.hh
@@ -65,8 +65,8 @@ public:
void addPair(const string& key, const string& value);
// virtual string getCode() const = 0;
- virtual void writeCFiles(string path) const {}
- virtual void writeHTMLFiles(string path) const {}
+ virtual void writeCFiles(string path) {}
+ virtual void writeHTMLFiles(string path) {}
virtual void print(ostream& out) const { out << "[Symbol: " << getIdent() << "]"; }
private:
diff --git a/src/mem/slicc/symbols/SymbolTable.cc b/src/mem/slicc/symbols/SymbolTable.cc
index e598ffcb4..8af3685f8 100644
--- a/src/mem/slicc/symbols/SymbolTable.cc
+++ b/src/mem/slicc/symbols/SymbolTable.cc
@@ -27,6 +27,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * SymbolTable.cc
+ *
+ * Description: See SymbolTable.hh
+ *
+ * $Id$
+ *
+ * */
+
#include "mem/slicc/symbols/SymbolTable.hh"
#include "mem/slicc/generator/fileio.hh"
#include "mem/slicc/generator/html_gen.hh"
@@ -163,7 +172,7 @@ void SymbolTable::writeCFiles(string path) const
{
int size = m_sym_vec.size();
{
- // Write the mem/protocol/Types.hh include file for the types
+ // Write the Types.hh include file for the types
ostringstream sstr;
sstr << "/** Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<< " */" << endl;
sstr << endl;
@@ -182,685 +191,69 @@ void SymbolTable::writeCFiles(string path) const
m_sym_vec[i]->writeCFiles(path + '/');
}
- writeChipFiles(path);
+ writeControllerFactory(path);
}
-void SymbolTable::writeChipFiles(string path) const
+void SymbolTable::writeControllerFactory(string path) const
{
- // Create Chip.cc and mem/protocol/Chip.hh
-
- // FIXME - Note: this method is _really_ ugly. Most of this
- // functionality should be pushed into each type of symbol and use
- // virtual methods to get the right behavior for each type of
- // symbol. This is also more flexible, and much cleaner.
-
+ ostringstream sstr;
int size = m_sym_vec.size();
- // Create Chip.h
- {
- ostringstream sstr;
- sstr << "/** \\file Chip.h " << endl;
- sstr << " * Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<<endl;
- sstr << " */ " <<endl<<endl;
-
- sstr << "#ifndef CHIP_H" << endl;
- sstr << "#define CHIP_H" << endl;
- sstr << endl;
+ sstr << "/** \\file ControllerFactory.hh " << endl;
+ sstr << " * Auto generatred C++ code started by " << __FILE__ << ":" << __LINE__ << endl;
+ sstr << " */" << endl << endl;
- // Includes
- sstr << "#include \"mem/ruby/common/Global.hh\"" << endl;
- sstr << "#include \"mem/protocol/Types.hh\"" << endl;
- sstr << "#include \"mem/ruby/slicc_interface/AbstractChip.hh\"" << endl;
- sstr << "class Network;" << endl;
- sstr << endl;
-
- // Class declarations for all Machines/Controllers
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- sstr << "class " << machine->getIdent() << "_Controller;" << endl;
- }
- }
+ sstr << "#ifndef CONTROLLERFACTORY_H" << endl;
+ sstr << "#define CONTROLLERFACTORY_H" << endl;
+ sstr << endl;
- sstr << "class Chip : public AbstractChip {" << endl;
- sstr << "public:" << endl;
- sstr << endl;
- sstr << " // Constructors" << endl;
- sstr << " Chip(NodeID chip_number, Network* net_ptr);" << endl;
- sstr << endl;
- sstr << " // Destructor" << endl;
- sstr << " ~Chip();" << endl;
- sstr << endl;
- sstr << " // Public Methods" << endl;
- sstr << " void recordCacheContents(CacheRecorder& tr) const;" << endl;
- sstr << " void dumpCaches(ostream& out) const;" << endl;
- sstr << " void dumpCacheData(ostream& out) const;" << endl;
- sstr << " static void printStats(ostream& out);" << endl;
- sstr << " static void clearStats();" << endl;
- sstr << " void printConfig(ostream& out);" << endl;
- sstr << " void print(ostream& out) const;" << endl;
-
- // Used by coherence checker
- sstr << "#ifdef CHECK_COHERENCE" << endl;
- sstr << " bool isBlockShared(const Address& addr) const;" << endl;
- sstr << " bool isBlockExclusive(const Address& addr) const;" << endl;
- sstr << "#endif /* CHECK_COHERENCE */" << endl;
+ Vector< string > controller_types;
- sstr << endl;
- sstr << "private:" << endl;
- sstr << " // Private copy constructor and assignment operator" << endl;
- sstr << " Chip(const Chip& obj);" << endl;
- sstr << " Chip& operator=(const Chip& obj);" << endl;
- sstr << endl;
- sstr << "public: // FIXME - these should not be public" << endl;
- sstr << " // Data Members (m_ prefix)" << endl;
- sstr << endl;
- sstr << " Chip* m_chip_ptr;" << endl;
- sstr << endl;
- sstr << " // SLICC object variables" << endl;
- sstr << endl;
+ // includes
+ sstr << "#include <string>" << endl;
+ sstr << "class Network;" << endl;
+ sstr << "class AbstractController;" << endl;
+ sstr << endl;
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL) {
- if (var->existPair("chip_object")) {
- if (var->existPair("no_chip_object")) {
- // Do nothing
- } else {
- string template_hack = "";
- if (var->existPair("template_hack")) {
- template_hack = var->lookupPair("template_hack");
- }
- if (// var->existPair("network") || var->getType()->existPair("cache") ||
-// var->getType()->existPair("tbe") || var->getType()->existPair("newtbe") ||
-// var->getType()->existPair("dir") || var->getType()->existPair("persistent") ||
-// var->getType()->existPair("filter") || var->getType()->existPair("timer") ||
-// var->existPair("trigger_queue")
- var->existPair("no_vector")
- ) {
- sstr << " " << var->getType()->cIdent() << template_hack << "* m_"
- << var->cIdent() << "_ptr;" << endl;
- } else {
- // create pointer except those created in AbstractChip
- if (!(var->existPair("abstract_chip_ptr"))) {
- sstr << " Vector < " << var->getType()->cIdent() << template_hack
- << "* > m_" << var->cIdent() << "_vec;" << endl;
- }
- }
- }
- }
- }
- }
+ sstr << "class ControllerFactory {" << endl;
+ sstr << "public:" << endl;
+ sstr << " static AbstractController* createController(const std::string & controller_type, const std::string & name);" << endl;
+ sstr << "};" << endl;
+ sstr << endl;
- sstr << endl;
- sstr << " // SLICC machine/controller variables" << endl;
+ sstr << "#endif // CONTROLLERFACTORY_H" << endl;
+ conditionally_write_file(path + "/ControllerFactory.hh", sstr);
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " Vector < " << ident << "* > m_" << ident << "_vec;\n";
- }
- }
+ // ControllerFactory.cc file
- sstr << endl;
+ sstr.str("");
- sstr << " // machine external SLICC function decls\n";
+ sstr << "/** \\file ControllerFactory.cc " << endl;
+ sstr << " * Auto generatred C++ code started by " << __FILE__ << ":" << __LINE__ << endl;
+ sstr << " */" << endl << endl;
- // Look at all 'Functions'
- for(int i=0; i<size; i++) {
- Func* func = dynamic_cast<Func*>(m_sym_vec[i]);
- if (func != NULL) {
- string proto;
- func->funcPrototype(proto);
- if (proto != "") {
- sstr << " " << proto;
- }
- }
+ // includes
+ sstr << "#include \"mem/protocol/ControllerFactory.hh\"" << endl;
+ sstr << "#include \"mem/ruby/slicc_interface/AbstractController.hh\"" << endl;
+ sstr << "#include \"mem/protocol/MachineType.hh\"" << endl;
+ for(int i=0; i<size; i++) {
+ StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
+ if (machine != NULL) {
+ sstr << "#include \"mem/protocol/" << machine->getIdent() << "_Controller.hh\"" << endl;
+ controller_types.insertAtBottom(machine->getIdent());
}
-
- sstr << "};" << endl;
- sstr << endl;
- sstr << "#endif // CHIP_H" << endl;
-
- conditionally_write_file(path + "/Chip.hh", sstr);
}
- // Create Chip.cc
- {
- ostringstream sstr;
- sstr << "// Auto generated C++ code started by "<<__FILE__<<":"<<__LINE__<<endl<<endl;
- sstr << "#include \"mem/protocol/Chip.hh\"" << endl;
- sstr << "#include \"mem/ruby/network/Network.hh\"" << endl;
- sstr << "#include \"mem/ruby/recorder/CacheRecorder.hh\"" << endl;
- sstr << "" << endl;
-
- sstr << "// Includes for controllers" << endl;
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- sstr << "#include \"mem/protocol/" << machine->getIdent() << "_Controller.hh\"" << endl;
- }
- }
-
- sstr << "" << endl;
- sstr << "Chip::Chip(NodeID id, Network* net_ptr):AbstractChip(id, net_ptr)" << endl;
- sstr << "{" << endl;
- sstr << " m_chip_ptr = this;" << endl;
-
- // FIXME - WHY IS THIS LOOP HERE?
- // WE SEEM TO BE CREATING A SEQUENCER HERE THEN OVERWRITTING THAT INSTANITATION
- // IN THE NEXT LOOP
-// // find sequencer's type
-// for(int i=0; i<size; i++) {
-// Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
-// if(var && var->cIdent() == "sequencer")
-// sstr << " m_sequencer_ptr = new " << var->getType()->cIdent() << "(this);\n";
-// }
-
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL && var->existPair("chip_object") && !var->existPair("no_chip_object")) {
-
- sstr << " // " << var->cIdent() << endl;
- if (!var->existPair("network")) {
- // Not a network port object
- if (var->getType()->existPair("primitive")) {
- // Normal non-object
- // sstr << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << ";\n";
-
- sstr << " m_" << var->cIdent();
- sstr << "_vec.setSize(RubyConfig::numberOf";
- sstr << var->getMachine()->getIdent() << "PerChip(m_id));" << endl;
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i] = new " << var->getType()->cIdent() << ";\n";
- if (var->existPair("default")) {
- sstr << " *(m_" << var->cIdent() << "_vec[i]) = " << var->lookupPair("default") << ";\n";
- }
- sstr << " }\n";
-
- } else {
-
- // Normal Object
- string template_hack = "";
- if (var->existPair("template_hack")) {
- template_hack = var->lookupPair("template_hack");
- }
- if (// var->getType()->existPair("cache") || var->getType()->existPair("tbe") ||
-// var->getType()->existPair("newtbe") || var->getType()->existPair("timer") ||
-// var->getType()->existPair("dir") || var->getType()->existPair("persistent") ||
-// var->getType()->existPair("filter") || var->existPair("trigger_queue")
- var->existPair("no_vector")) {
- sstr << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << template_hack;
- if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
- if (var->existPair("constructor_hack")) {
- string constructor_hack = var->lookupPair("constructor_hack");
- sstr << "(this, " << constructor_hack << ")";
- } else {
- sstr << "(this)";
- }
- }
- sstr << ";\n";
- sstr << " assert(m_" << var->cIdent() << "_ptr != NULL);" << endl;
-
- if (var->existPair("default")) {
- sstr << " (*m_" << var->cIdent() << "_ptr) = " << var->lookupPair("default")
- << "; // Object default" << endl;
- } else if (var->getType()->hasDefault()) {
- sstr << " (*m_" << var->cIdent() << "_ptr) = " << var->getType()->getDefault()
- << "; // Type " << var->getType()->getIdent() << " default" << endl;
- }
-
- // Set ordering
- if (var->existPair("ordered") && !var->existPair("trigger_queue")) {
- // A buffer
- string ordered = var->lookupPair("ordered");
- sstr << " m_" << var->cIdent() << "_ptr->setOrdering(" << ordered << ");\n";
- }
-
- // Set randomization
- if (var->existPair("random")) {
- // A buffer
- string value = var->lookupPair("random");
- sstr << " m_" << var->cIdent() << "_ptr->setRandomization(" << value << ");\n";
- }
-
- // Set Priority
- if (var->getType()->isBuffer() && var->existPair("rank") && !var->existPair("trigger_queue")) {
- string rank = var->lookupPair("rank");
- sstr << " m_" << var->cIdent() << "_ptr->setPriority(" << rank << ");\n";
- }
- } else if ((var->getType()->existPair("mover")) && (var->getMachine()->getIdent() == "L2Cache")) {
- // FIXME - dnuca mover is a special case
- sstr << " m_" << var->cIdent() << "_ptr = NULL;" << endl;
- sstr << " if (RubyConfig::isL2CacheDNUCAMoverChip(m_id)) {" << endl;
- sstr << " m_" << var->cIdent() << "_ptr = new " << var->getType()->cIdent() << template_hack;
- if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
- if (var->existPair("constructor_hack")) {
- string constructor_hack = var->lookupPair("constructor_hack");
- sstr << "(this, " << constructor_hack << ")";
- } else {
- sstr << "(this)";
- }
- }
- sstr << ";\n";
- sstr << " }\n";
- } else if (var->getType()->existPair("mover") && ((var->getMachine()->getIdent() == "L1Cache") || (var->getMachine()->getIdent() == "Collector"))) {
- sstr << " m_" << var->cIdent() << "_ptr = NULL;" << endl;
- sstr << " \n";
- } else {
- sstr << " m_" << var->cIdent();
- sstr << "_vec.setSize(RubyConfig::numberOf";
- sstr << var->getMachine()->getIdent() << "PerChip(m_id));" << endl;
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
-
-
- ostringstream tail;
- tail << template_hack;
- if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
- if (var->existPair("constructor_hack")) {
- string constructor_hack = var->lookupPair("constructor_hack");
- tail << "(this, " << constructor_hack << ")";
- } else {
- tail << "(this)";
- }
- }
- tail << ";\n";
-
-
- if(var->existPair("child_selector")){
- string child_selector = var->lookupPair("child_selector");
- string child_types = var->lookupPair("child_types");
- string::iterator it = child_types.begin();
-
- unsigned num_types = 0;
- for(unsigned t=0;t<child_types.size();t++){
- if(child_types.at(t) == '<'){
- num_types++;
- }
- }
-
- string* types = new string[num_types];
- string* ids = new string[num_types];
- int type_idx = 0;
- bool id_done = false;
- for(unsigned t=0;t<child_types.size();t++){
- if(child_types[t] == '<'){
- id_done = false;
- unsigned r;
- for(r=t+1;child_types.at(r)!='>';r++){
- if(r == child_types.size()){
- cerr << "Parse error in child_types" << endl;
- exit(EXIT_FAILURE);
- }
- if(child_types.at(r) == ' ') continue; //ignore whitespace
- if(child_types.at(r) == ',') {id_done = true;continue;}
- if(id_done == true)
- types[type_idx].push_back(child_types.at(r));
- else
- ids[type_idx].push_back(child_types.at(r));
- }
- type_idx++;
- t = r;
- }
- }
-
- for(unsigned t=0;t<num_types;t++){
- if(t==0)
- sstr << " if(strcmp(" << child_selector << ", \"" << ids[t] << "\") == 0)" << endl;
- else
- sstr << " else if(strcmp(" << child_selector << ", \"" << ids[t] << "\") == 0)" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i] = new " << types[t] << tail.str() << endl;
- }
- }
- else {
- sstr << " m_" << var->cIdent() << "_vec[i] = new " << var->getType()->cIdent() << tail.str() << endl;
- }
-
- sstr << " assert(m_" << var->cIdent() << "_vec[i] != NULL);" << endl;
- if (var->existPair("ordered")) {
- string ordered = var->lookupPair("ordered");
- sstr << " m_" << var->cIdent() << "_vec[i]->setOrdering(" << ordered << ");\n";
- }
- if (var->existPair("rank")) {
- string rank = var->lookupPair("rank");
- sstr << " m_" << var->cIdent() << "_vec[i]->setPriority(" << rank << ");\n";
- }
-
- // Set buffer size
- if (var->getType()->isBuffer() && !var->existPair("infinite")) {
- sstr << " if (FINITE_BUFFERING) {\n";
- sstr << " m_" << var->cIdent() << "_vec[i]->setSize(PROCESSOR_BUFFER_SIZE);\n";
- sstr << " }\n";
- }
-
- sstr << " }\n";
- }
- }
-
- sstr << endl;
-
- } else {
- // Network port object
- string network = var->lookupPair("network");
- string ordered = var->lookupPair("ordered");
- string vnet = var->lookupPair("virtual_network");
-
- if (var->getMachine() != NULL) {
- sstr << " m_" << var->cIdent() << "_vec.setSize(RubyConfig::numberOf"
- << var->getMachine()->getIdent() << "PerChip(m_id));" << endl;
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i] = m_net_ptr->get"
- << network << "NetQueue(i+m_id*RubyConfig::numberOf" <<var->getMachine()->getIdent()
- << "PerChip()+MachineType_base_number(string_to_MachineType(\""
- << var->getMachine()->getIdent() << "\")), "
- << ordered << ", " << vnet << ");\n";
- sstr << " assert(m_" << var->cIdent() << "_vec[i] != NULL);" << endl;
- } else { // old protocol
- sstr << " m_" << var->cIdent() << "_vec.setSize(1);" << endl;
- sstr << " for (int i = 0; i < 1; i++) {" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i] = m_net_ptr->get"
- << network << "NetQueue(m_id, "
- << ordered << ", " << vnet << ");\n";
- sstr << " assert(m_" << var->cIdent() << "_vec[i] != NULL);" << endl;
- }
-
- // Set ordering
- if (var->existPair("ordered")) {
- // A buffer
- string ordered = var->lookupPair("ordered");
- sstr << " m_" << var->cIdent() << "_vec[i]->setOrdering(" << ordered << ");\n";
- }
-
- // Set randomization
- if (var->existPair("random")) {
- // A buffer
- string value = var->lookupPair("random");
- sstr << " m_" << var->cIdent() << "_vec[i]->setRandomization(" << value << ");\n";
- }
-
- // Set Priority
- if (var->existPair("rank")) {
- string rank = var->lookupPair("rank");
- sstr << " m_" << var->cIdent() << "_vec[i]->setPriority(" << rank << ");\n";
- }
-
- // Set buffer size
- if (var->getType()->isBuffer()) {
- sstr << " if (FINITE_BUFFERING) {\n";
- sstr << " m_" << var->cIdent() << "_vec[i]->setSize(PROTOCOL_BUFFER_SIZE);\n";
- sstr << " }\n";
- }
-
- sstr << " }\n";
- }
- }
- }
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " m_" << ident << "_vec.setSize(RubyConfig::numberOf" << machine->getIdent()
- << "PerChip(m_id));" << endl;
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " m_" << ident << "_vec[i] = new " << ident << "(this, i);\n";
- sstr << " assert(m_" << ident << "_vec[i] != NULL);" << endl;
- sstr << " }\n";
- sstr << endl;
- }
- }
-
- sstr << "}" << endl;
- sstr << endl;
- sstr << "Chip::~Chip()\n";
- sstr << "{\n";
-
-// // FIXME: sequencer shouldn' be manually handled
-// sstr << " delete m_sequencer_ptr;" << endl;
-
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL) {
- if (var->existPair("chip_object")) {
- if (var->existPair("no_chip_object")) {
- // Do nothing
- } else {
- string template_hack = "";
- if (var->existPair("template_hack")) {
- template_hack = var->lookupPair("template_hack");
- }
- if (// var->getType()->existPair("cache") || var->getType()->existPair("tbe") ||
-// var->getType()->existPair("newtbe") || var->getType()->existPair("timer") ||
-// var->getType()->existPair("dir") || var->getType()->existPair("persistent") ||
-// var->getType()->existPair("filter") || var->existPair("trigger_queue")
- var->existPair("no_vector")) {
- sstr << " delete m_" << var->cIdent() << "_ptr;\n";
- } else if ((var->getType()->existPair("mover")) && (var->getMachine()->getIdent() == "L2Cache")) {
- sstr << " if (RubyConfig::isL2CacheDNUCAMoverChip(m_id)) {" << endl;
- sstr << " delete m_" << var->cIdent() << "_ptr;\n";
- sstr << " }\n";
- } else if (var->getType()->existPair("mover") && ((var->getMachine()->getIdent() == "L1Cache") || (var->getMachine()->getIdent() == "Collector"))) {
- sstr << " m_" << var->cIdent() << "_ptr = NULL;" << endl;
- } else if (!var->existPair("network")) {
- // Normal Object
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " delete m_" << var->cIdent() << "_vec[i];\n";
- sstr << " }\n";
- }
- }
- }
- }
- }
-
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " delete m_" << ident << "_vec[i];\n";
- sstr << " }\n";
- }
- }
- sstr << "}\n";
-
- sstr << "\n";
- sstr << "void Chip::clearStats()\n";
- sstr << "{\n";
-
-
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " " << ident << "::clearStats();\n";
- }
- }
-
- sstr << "}\n";
-
- sstr << "\n";
- sstr << "void Chip::printStats(ostream& out)\n";
- sstr << "{\n";
- sstr << " out << endl;\n";
- sstr << " out << \"Chip Stats\" << endl;\n";
- sstr << " out << \"----------\" << endl << endl;\n";
-
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " " << ident << "::dumpStats(out);\n";
- }
- }
-
- sstr << "}" << endl;
- sstr << endl;
- sstr << "void Chip::printConfig(ostream& out)\n";
- sstr << "{\n";
- sstr << " out << \"Chip Config\" << endl;\n";
- sstr << " out << \"-----------\" << endl;\n";
- sstr << " out << \"Total_Chips: \" << RubyConfig::numberOfChips() << endl;\n";
-
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL) {
- if (var->existPair("chip_object")) {
- if (var->existPair("no_chip_object")) {
- // Do nothing
- } else {
- string template_hack = "";
- if (var->existPair("template_hack")) {
- template_hack = var->lookupPair("template_hack");
- }
-
- if (!var->existPair("network") && (!var->getType()->existPair("primitive"))) {
- // Normal Object
- if (!var->getType()->existPair("non_obj") && (!var->getType()->isEnumeration())) {
- if (var->existPair("no_vector")) {
- sstr << " m_" << var->cIdent() << "_ptr->printConfig(out);\n";
- } else {
- sstr << " out << \"\\n" << var->cIdent() << " numberPerChip: \" << RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip() << endl;\n";
- sstr << " m_" << var->cIdent() << "_vec[0]->printConfig(out);\n";
-// sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
-// << "PerChip(m_id); i++) {" << endl;
-// sstr << " m_" << var->cIdent() << "_vec[i]->printConfig(out);\n";
-// sstr << " }\n";
- }
- }
- }
- }
- }
- }
- }
-
- sstr << " out << endl;\n";
- sstr << "}" << endl;
-
- sstr << endl;
- sstr << "void Chip::print(ostream& out) const\n";
- sstr << "{\n";
- sstr << " out << \"Ruby Chip\" << endl;\n";
- sstr << "}" << endl;
-
- sstr << "#ifdef CHECK_COHERENCE" << endl;
- sstr << endl;
- sstr << "bool Chip::isBlockShared(const Address& addr) const" << endl;
- sstr << "{" << endl;
-
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " if (m_" << ident << "_vec[i]->" << machine->getIdent() << "_isBlockShared(addr)) {\n";
- sstr << " return true; \n";
- sstr << " }\n";
- sstr << " }\n";
- }
- }
- sstr << " return false;" << endl;
- sstr << "}" << endl;
- sstr << endl;
-
- sstr << endl;
- sstr << "bool Chip::isBlockExclusive(const Address& addr) const" << endl;
- sstr << "{" << endl;
-
- // Look at all 'Machines'
- for(int i=0; i<size; i++) {
- StateMachine* machine = dynamic_cast<StateMachine*>(m_sym_vec[i]);
- if (machine != NULL) {
- string ident = machine->getIdent() + "_Controller";
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << machine->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " if (m_" << ident << "_vec[i]->" << machine->getIdent() << "_isBlockExclusive(addr)) {\n";
- sstr << " return true; \n";
- sstr << " }\n";
- sstr << " }\n";
- }
- }
-
- sstr << " return false;" << endl;
- sstr << "}" << endl;
- sstr << endl;
-
- sstr << "#endif /* CHECK_COHERENCE */ " << endl;
-
-
- sstr << endl;
- sstr << "void Chip::dumpCaches(ostream& out) const" << endl;
- sstr << "{" << endl;
-
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL) {
- if (var->getType()->existPair("cache")){ // caches are partitioned one per controller instaniation
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i]->print(out);\n";
- sstr << " }\n";
- }
- }
- }
- sstr << "}" << endl;
- sstr << endl;
-
- // Function to dump cache tag and data information
- sstr << "void Chip::dumpCacheData(ostream& out) const" << endl;
- sstr << "{" << endl;
-
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL) {
- if (var->getType()->existPair("cache")){ // caches are partitioned one per controller instaniation
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i]->printData(out);\n";
- sstr << " }\n";
- }
- }
- }
- sstr << "}" << endl;
- sstr << endl;
-
- sstr << "void Chip::recordCacheContents(CacheRecorder& tr) const" << endl;
- sstr << "{" << endl;
-
- // Look at all 'Vars'
- for(int i=0; i<size; i++) {
- Var* var = dynamic_cast<Var*>(m_sym_vec[i]);
- if (var != NULL) {
- if (var->getType()->existPair("cache")){ // caches are partitioned one per controller instaniation
- sstr << " for (int i = 0; i < RubyConfig::numberOf" << var->getMachine()->getIdent()
- << "PerChip(m_id); i++) {" << endl;
- sstr << " m_" << var->cIdent() << "_vec[i]->recordCacheContents(tr);\n";
- sstr << " }\n";
- }
- }
- }
- sstr << "}" << endl;
+ sstr << endl;
- conditionally_write_file(path + "/Chip.cc", sstr);
+ sstr << "AbstractController* ControllerFactory::createController(const std::string & controller_type, const std::string & name) {" << endl;
+ for (int i=0;i<controller_types.size();i++) {
+ sstr << " if (controller_type == \"" << controller_types[i] << "\")" << endl;
+ sstr << " return new " << controller_types[i] << "_Controller(name);" << endl;
}
+ sstr << " assert(0); // invalid controller type" << endl;
+ sstr << " return NULL;" << endl;
+ sstr << "}" << endl;
+ conditionally_write_file(path + "/ControllerFactory.cc", sstr);
}
Vector<StateMachine*> SymbolTable::getStateMachines() const
diff --git a/src/mem/slicc/symbols/SymbolTable.hh b/src/mem/slicc/symbols/SymbolTable.hh
index afd3f9443..90d3f48c3 100644
--- a/src/mem/slicc/symbols/SymbolTable.hh
+++ b/src/mem/slicc/symbols/SymbolTable.hh
@@ -91,7 +91,7 @@ public:
private:
// Private Methods
void registerGlobalSym(string id, Symbol* sym_ptr);
- void writeChipFiles(string path) const;
+ void writeControllerFactory(string path) const;
// Private copy constructor and assignment operator
SymbolTable(const SymbolTable& obj);
diff --git a/src/mem/slicc/symbols/Type.cc b/src/mem/slicc/symbols/Type.cc
index 0a3a7a3be..75f72af02 100644
--- a/src/mem/slicc/symbols/Type.cc
+++ b/src/mem/slicc/symbols/Type.cc
@@ -176,7 +176,7 @@ bool Type::enumAdd(string id, Map<string, string> pairs_map)
}
}
-void Type::writeCFiles(string path) const
+void Type::writeCFiles(string path)
{
if (isExternal()) {
// Do nothing
@@ -298,6 +298,13 @@ void Type::printTypeH(string path) const
out << " }" << endl;
} // end of if(!isGlobal())
+ // create a static factory method
+ if (interface != "") {
+ out << " static " << interface << "* create() {" << endl;
+ out << " return new " << type_name << "(); " << endl;
+ out << " }" << endl;
+ }
+
// bobba -
//******** Partial init constructor ********
//** Constructor needs only the first n-1 data members for init
@@ -499,9 +506,6 @@ void Type::printEnumH(string path) const
out << endl;
// Include all of the #includes needed
out << "#include \"mem/ruby/common/Global.hh\"" << endl;
- if (m_isMachineType) {
- out << "#include \"mem/ruby/config/RubyConfig.hh\"" << endl << endl;
- }
out << endl;
// Class definition
@@ -543,9 +547,10 @@ void Type::printEnumH(string path) const
// MachineType hack used to set the base component id for each Machine
if (m_isMachineType) {
out << "int " << type_name << "_base_level(const " << type_name << "& obj);" << endl;
+ out << "MachineType " << type_name << "_from_base_level(int);" << endl;
out << "int " << type_name << "_base_number(const " << type_name << "& obj);" << endl;
out << "int " << type_name << "_base_count(const " << type_name << "& obj);" << endl;
- out << "int " << type_name << "_chip_count(const " << type_name << "& obj, NodeID chipID);" << endl;
+ // out << "int " << type_name << "_chip_count(const " << type_name << "& obj, int chipID);" << endl;
for(int i = 0; i < size; i++ ) {
string id = m_enum_vec[i];
@@ -577,6 +582,14 @@ void Type::printEnumC(string path) const
out << endl;
out << "#include \"mem/protocol/" << type_name << ".hh\"" << endl;
+ if (m_isMachineType) {
+ out << "#include \"mem/ruby/config/RubyConfig.hh\"" << endl;
+ out << "#include \"mem/protocol/ControllerFactory.hh\"" << endl;
+ for( int i = 0; i<size; i++ ) {
+ out << "#include \"mem/protocol/" << m_enum_vec[i] << "_Controller.hh\"" << endl;
+ }
+ out << endl;
+ }
out << endl;
// Code for output operator
@@ -637,7 +650,7 @@ void Type::printEnumC(string path) const
out << "/** \\brief returns the base vector index for each machine type to be used by NetDest " << endl;
out << " * " << endl;
out << " * \\return the base vector index for each machine type to be used by NetDest" << endl;
- out << " * \\see mem/ruby/common/NetDest.hh" << endl;
+ out << " * \\see NetDest.hh" << endl;
out << " */" << endl;
out << "int " << type_name << "_base_level(const " << type_name << "& obj)" << endl;
out << "{" << endl;
@@ -662,6 +675,30 @@ void Type::printEnumC(string path) const
out << " }" << endl;
out << "}" << endl;
+ out << "/** \\brief returns the machine type for each base vector index used by NetDest and RubyConfig" << endl;
+ out << " * " << endl;
+ out << " * \\return the MachineTYpe" << endl;
+ out << " */" << endl;
+ out << "MachineType " << type_name << "_from_base_level(int type)" << endl;
+ out << "{" << endl;
+ out << " switch(type) {" << endl;
+
+ // For each field
+ MachineNames.clear();
+ for( int i = 0; i<size; i++ ) {
+ out << " case " << MachineNames.size() << ":" << endl;
+ out << " return " << type_name << "_" << m_enum_vec[i] << ";" << endl;
+ MachineNames.insertAtBottom(m_enum_vec[i]);
+ }
+
+ // Trailer
+ out << " default:" << endl;
+ out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+ out << " return MachineType_NUM;" << endl;
+ out << " }" << endl;
+ out << "}" << endl;
+
+
out << endl;
out << "/** \\brief The return value indicates the number of components created" << endl;
out << " * before a particular machine's components" << endl;
@@ -678,7 +715,7 @@ void Type::printEnumC(string path) const
out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
out << " return 0";
for ( int m = 0; m<MachineNames.size(); m++) {
- out << "+RubyConfig::numberOf" << MachineNames[m] << "()";
+ out << "+ " << MachineNames[m] << "_Controller::getNumControllers()";
}
out << ";" << endl;
MachineNames.insertAtBottom(m_enum_vec[i]);
@@ -688,7 +725,7 @@ void Type::printEnumC(string path) const
out << " case " << type_name << "_NUM:" << endl;
out << " return 0";
for ( int m = 0; m<MachineNames.size(); m++) {
- out << "+RubyConfig::numberOf" << MachineNames[m] << "()";
+ out << "+ " << MachineNames[m] << "_Controller::getNumControllers()";
}
out << ";" << endl;
@@ -711,7 +748,7 @@ void Type::printEnumC(string path) const
// For each field
for( int i = 0; i<size; i++ ) {
out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
- out << " return RubyConfig::numberOf" << m_enum_vec[i] << "();" << endl;
+ out << " return " << m_enum_vec[i] << "_Controller::getNumControllers();" << endl;
}
// total num
@@ -724,27 +761,28 @@ void Type::printEnumC(string path) const
out << "}" << endl;
out << endl;
- out << "/** \\brief returns the total number of components for each machine" << endl;
- out << " * \\return the total number of components for each machine" << endl;
- out << " */" << endl;
- out << "int " << type_name << "_chip_count(const " << type_name << "& obj, NodeID chipID)" << endl;
- out << "{" << endl;
- out << " switch(obj) {" << endl;
- // For each field
- for( int i = 0; i<size; i++ ) {
- out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
- out << " return RubyConfig::numberOf" << m_enum_vec[i] << "PerChip(chipID);" << endl;
- }
+// out << "/** \\brief returns the total number of components for each machine" << endl;
+// out << " * \\return the total number of components for each machine" << endl;
+// out << " */" << endl;
+// out << "int " << type_name << "_chip_count(const " << type_name << "& obj, int chip_id)" << endl;
+// out << "{" << endl;
+// out << " switch(obj) {" << endl;
+
+// // For each field
+// for( int i = 0; i<size; i++ ) {
+// out << " case " << type_name << "_" << m_enum_vec[i] << ":" << endl;
+// out << " return RubyConfig::getNumberOfControllersPerTypePerChip(MachineType_base_level(MachineType_" << m_enum_vec[i] << "), chip_id);" << endl;
+// }
- // total num
- out << " case " << type_name << "_NUM:" << endl;
- // Trailer
- out << " default:" << endl;
- out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
- out << " return -1;" << endl;
- out << " }" << endl;
- out << "}" << endl;
+// // total num
+// out << " case " << type_name << "_NUM:" << endl;
+// // Trailer
+// out << " default:" << endl;
+// out << " ERROR_MSG(\"Invalid range for type " << type_name << "\");" << endl;
+// out << " return -1;" << endl;
+// out << " }" << endl;
+// out << "}" << endl;
}
diff --git a/src/mem/slicc/symbols/Type.hh b/src/mem/slicc/symbols/Type.hh
index cd09c066f..07d661d3c 100644
--- a/src/mem/slicc/symbols/Type.hh
+++ b/src/mem/slicc/symbols/Type.hh
@@ -68,6 +68,7 @@ public:
bool isEnumeration() const { return existPair("enumeration"); }
bool isExternal() const { return existPair("external"); }
bool isGlobal() const { return existPair("global"); }
+ bool isInterface() const { return existPair("interface"); }
// The data members of this type - only valid for messages and SLICC
// declared structures
@@ -92,7 +93,7 @@ public:
bool enumExist(string id) const { return m_enum_map.exist(id); }
// Write the C output files
- void writeCFiles(string path) const;
+ void writeCFiles(string path) ;
bool hasDefault() const { return existPair("default"); }
string getDefault() const { return lookupPair("default"); }
diff --git a/src/mem/slicc/symbols/Var.hh b/src/mem/slicc/symbols/Var.hh
index 58e9f28e4..4cb504296 100644
--- a/src/mem/slicc/symbols/Var.hh
+++ b/src/mem/slicc/symbols/Var.hh
@@ -61,7 +61,7 @@ public:
// Public Methods
string cIdent() const { return m_c_id; }
- void writeCFiles(string path) const {}
+ void writeCFiles(string path) {}
string getCode() const { return m_code; }
Type* getType() const { return m_type_ptr; }
StateMachine* getMachine() const { return m_machine_ptr; }