summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configs/ruby/MESI_Three_Level.py28
-rw-r--r--configs/ruby/MESI_Two_Level.py35
-rw-r--r--configs/ruby/MI_example.py25
-rw-r--r--configs/ruby/MOESI_CMP_directory.py29
-rw-r--r--configs/ruby/MOESI_CMP_token.py35
-rw-r--r--configs/ruby/MOESI_hammer.py31
-rw-r--r--configs/ruby/Network_test.py14
-rw-r--r--configs/ruby/Ruby.py56
-rw-r--r--src/mem/protocol/MESI_Three_Level-L0cache.sm11
-rw-r--r--src/mem/protocol/MESI_Three_Level-L1cache.sm42
-rw-r--r--src/mem/protocol/MESI_Two_Level-L1cache.sm39
-rw-r--r--src/mem/protocol/MESI_Two_Level-L2cache.sm27
-rw-r--r--src/mem/protocol/MESI_Two_Level-dir.sm18
-rw-r--r--src/mem/protocol/MESI_Two_Level-dma.sm15
-rw-r--r--src/mem/protocol/MI_example-cache.sm29
-rw-r--r--src/mem/protocol/MI_example-dir.sm26
-rw-r--r--src/mem/protocol/MI_example-dma.sm19
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm35
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm21
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm17
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm26
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm40
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm49
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm34
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dma.sm13
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm41
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm47
-rw-r--r--src/mem/protocol/MOESI_hammer-dma.sm23
-rw-r--r--src/mem/protocol/Network_test-cache.sm18
-rw-r--r--src/mem/protocol/Network_test-dir.sm12
-rw-r--r--src/mem/ruby/network/Network.cc23
-rw-r--r--src/mem/ruby/network/Network.hh13
-rw-r--r--src/mem/ruby/network/Network.py4
-rw-r--r--src/mem/ruby/network/garnet/BaseGarnetNetwork.cc16
-rw-r--r--src/mem/ruby/network/garnet/BaseGarnetNetwork.hh11
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc36
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh9
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc44
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh8
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/Router.cc2
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.cc11
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh1
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc34
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.hh10
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.cc34
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.hh9
-rw-r--r--src/mem/ruby/network/simple/Switch.cc26
-rw-r--r--src/mem/ruby/network/simple/Switch.hh11
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc157
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh15
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.cc7
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.hh19
-rw-r--r--src/mem/slicc/symbols/StateMachine.py208
-rw-r--r--src/python/swig/pyobject.cc22
54 files changed, 927 insertions, 658 deletions
diff --git a/configs/ruby/MESI_Three_Level.py b/configs/ruby/MESI_Three_Level.py
index ee6ceccf9..1ddffc34a 100644
--- a/configs/ruby/MESI_Three_Level.py
+++ b/configs/ruby/MESI_Three_Level.py
@@ -129,7 +129,19 @@ def create_system(options, system, dma_ports, ruby_system):
cpu_sequencers.append(cpu_seq)
l0_cntrl_nodes.append(l0_cntrl)
l1_cntrl_nodes.append(l1_cntrl)
- l0_cntrl.peer = l1_cntrl
+
+ # Connect the L0 and L1 controllers
+ l0_cntrl.bufferToL1 = l1_cntrl.bufferFromL0
+ l0_cntrl.bufferFromL1 = l1_cntrl.bufferToL0
+
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestToL2 = ruby_system.network.slave
+ l1_cntrl.responseToL2 = ruby_system.network.slave
+ l1_cntrl.unblockToL2 = ruby_system.network.slave
+
+ l1_cntrl.requestFromL2 = ruby_system.network.master
+ l1_cntrl.responseFromL2 = ruby_system.network.master
+
for j in xrange(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
@@ -146,6 +158,15 @@ def create_system(options, system, dma_ports, ruby_system):
i * num_l2caches_per_cluster + j))
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.unblockToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@@ -183,6 +204,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
diff --git a/configs/ruby/MESI_Two_Level.py b/configs/ruby/MESI_Two_Level.py
index c70c599c7..8d75fe22e 100644
--- a/configs/ruby/MESI_Two_Level.py
+++ b/configs/ruby/MESI_Two_Level.py
@@ -108,12 +108,19 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromL1Cache = ruby_system.network.slave
+ l1_cntrl.responseFromL1Cache = ruby_system.network.slave
+ l1_cntrl.unblockFromL1Cache = ruby_system.network.slave
+
+ l1_cntrl.requestToL1Cache = ruby_system.network.master
+ l1_cntrl.responseToL1Cache = ruby_system.network.master
+
+
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
@@ -132,10 +139,21 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.unblockToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
+
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
@@ -169,10 +187,14 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
- #
# Create the Ruby objects associated with the dma controller
- #
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
@@ -185,6 +207,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
+ # Connect the dma controller to the network
+ dma_cntrl.responseFromDir = ruby_system.network.master
+ dma_cntrl.requestToDir = ruby_system.network.slave
+
+
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
diff --git a/configs/ruby/MI_example.py b/configs/ruby/MI_example.py
index 012479250..f671adbaa 100644
--- a/configs/ruby/MI_example.py
+++ b/configs/ruby/MI_example.py
@@ -94,12 +94,17 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromCache = ruby_system.network.slave
+ l1_cntrl.responseFromCache = ruby_system.network.slave
+ l1_cntrl.forwardToCache = ruby_system.network.master
+ l1_cntrl.responseToCache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@@ -139,6 +144,15 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+ dir_cntrl.forwardFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
@@ -155,8 +169,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
- all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+ # Connect the directory controllers and the network
+ dma_cntrl.requestToDir = ruby_system.network.master
+ dma_cntrl.responseFromDir = ruby_system.network.slave
- topology = create_topology(all_cntrls, options)
+ all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+ topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
diff --git a/configs/ruby/MOESI_CMP_directory.py b/configs/ruby/MOESI_CMP_directory.py
index aa474209f..d390efa0d 100644
--- a/configs/ruby/MOESI_CMP_directory.py
+++ b/configs/ruby/MOESI_CMP_directory.py
@@ -104,12 +104,17 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromL1Cache = ruby_system.network.slave
+ l1_cntrl.responseFromL1Cache = ruby_system.network.slave
+ l1_cntrl.requestToL1Cache = ruby_system.network.master
+ l1_cntrl.responseToL1Cache = ruby_system.network.master
+
+
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
@@ -128,10 +133,21 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
+
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
@@ -164,6 +180,13 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.forwardFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
@@ -180,11 +203,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
+
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
-
return (cpu_sequencers, dir_cntrl_nodes, topology)
diff --git a/configs/ruby/MOESI_CMP_token.py b/configs/ruby/MOESI_CMP_token.py
index 36a532574..ef793530b 100644
--- a/configs/ruby/MOESI_CMP_token.py
+++ b/configs/ruby/MOESI_CMP_token.py
@@ -124,12 +124,20 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromL1Cache = ruby_system.network.slave
+ l1_cntrl.responseFromL1Cache = ruby_system.network.slave
+ l1_cntrl.persistentFromL1Cache = ruby_system.network.slave
+
+ l1_cntrl.requestToL1Cache = ruby_system.network.master
+ l1_cntrl.responseToL1Cache = ruby_system.network.master
+ l1_cntrl.persistentToL1Cache = ruby_system.network.master
+
+
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
@@ -149,6 +157,17 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
+ # Connect the L2 controllers and the network
+ l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
+ l2_cntrl.responseFromL2Cache = ruby_system.network.slave
+
+ l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
+ l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
+ l2_cntrl.responseToL2Cache = ruby_system.network.master
+ l2_cntrl.persistentToL2Cache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@@ -186,6 +205,18 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.persistentToDir = ruby_system.network.master
+ dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+ dir_cntrl.requestFromDir = ruby_system.network.slave
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.persistentFromDir = ruby_system.network.slave
+ dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
diff --git a/configs/ruby/MOESI_hammer.py b/configs/ruby/MOESI_hammer.py
index de98fd0c2..c13a6cc3a 100644
--- a/configs/ruby/MOESI_hammer.py
+++ b/configs/ruby/MOESI_hammer.py
@@ -119,12 +119,22 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
+
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controller and the network
+ # Connect the buffers from the controller to network
+ l1_cntrl.requestFromCache = ruby_system.network.slave
+ l1_cntrl.responseFromCache = ruby_system.network.slave
+ l1_cntrl.unblockFromCache = ruby_system.network.slave
+
+ # Connect the buffers from the network to the controller
+ l1_cntrl.forwardToCache = ruby_system.network.master
+ l1_cntrl.responseToCache = ruby_system.network.master
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@@ -198,6 +208,17 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controller to the network
+ dir_cntrl.forwardFromDir = ruby_system.network.slave
+ dir_cntrl.responseFromDir = ruby_system.network.slave
+ dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
+
+ dir_cntrl.unblockToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.dmaRequestToDir = ruby_system.network.master
+
+
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
@@ -217,7 +238,11 @@ def create_system(options, system, dma_ports, ruby_system):
if options.recycle_latency:
dma_cntrl.recycle_latency = options.recycle_latency
+ # Connect the dma controller to the network
+ dma_cntrl.responseFromDir = ruby_system.network.slave
+ dma_cntrl.requestToDir = ruby_system.network.master
+
+
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
-
return (cpu_sequencers, dir_cntrl_nodes, topology)
diff --git a/configs/ruby/Network_test.py b/configs/ruby/Network_test.py
index 553927bb7..7e4379c0d 100644
--- a/configs/ruby/Network_test.py
+++ b/configs/ruby/Network_test.py
@@ -91,12 +91,16 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
- #
# Add controllers and sequencers to the appropriate lists
- #
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
+ # Connect the L1 controllers and the network
+ l1_cntrl.requestFromCache = ruby_system.network.slave
+ l1_cntrl.responseFromCache = ruby_system.network.slave
+ l1_cntrl.forwardFromCache = ruby_system.network.slave
+
+
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@@ -114,6 +118,12 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
+ # Connect the directory controllers and the network
+ dir_cntrl.requestToDir = ruby_system.network.master
+ dir_cntrl.forwardToDir = ruby_system.network.master
+ dir_cntrl.responseToDir = ruby_system.network.master
+
+
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
diff --git a/configs/ruby/Ruby.py b/configs/ruby/Ruby.py
index d9517456b..3c43fa6c6 100644
--- a/configs/ruby/Ruby.py
+++ b/configs/ruby/Ruby.py
@@ -106,31 +106,7 @@ def create_system(options, system, piobus = None, dma_ports = []):
system.ruby = RubySystem(no_mem_vec = options.use_map)
ruby = system.ruby
- protocol = buildEnv['PROTOCOL']
- exec "import %s" % protocol
- try:
- (cpu_sequencers, dir_cntrls, topology) = \
- eval("%s.create_system(options, system, dma_ports, ruby)"
- % protocol)
- except:
- print "Error: could not create sytem for ruby protocol %s" % protocol
- raise
-
- # Create a port proxy for connecting the system port. This is
- # independent of the protocol and kept in the protocol-agnostic
- # part (i.e. here).
- sys_port_proxy = RubyPortProxy(ruby_system = ruby)
- # Give the system port proxy a SimObject parent without creating a
- # full-fledged controller
- system.sys_port_proxy = sys_port_proxy
-
- # Connect the system port for loading of binaries etc
- system.system_port = system.sys_port_proxy.slave
-
-
- #
# Set the network classes based on the command line options
- #
if options.garnet_network == "fixed":
NetworkClass = GarnetNetwork_d
IntLinkClass = GarnetIntLink_d
@@ -152,10 +128,34 @@ def create_system(options, system, piobus = None, dma_ports = []):
RouterClass = Switch
InterfaceClass = None
+ # Instantiate the network object so that the controllers can connect to it.
+ network = NetworkClass(ruby_system = ruby, topology = options.topology,
+ routers = [], ext_links = [], int_links = [], netifs = [])
+ ruby.network = network
+
+ protocol = buildEnv['PROTOCOL']
+ exec "import %s" % protocol
+ try:
+ (cpu_sequencers, dir_cntrls, topology) = \
+ eval("%s.create_system(options, system, dma_ports, ruby)"
+ % protocol)
+ except:
+ print "Error: could not create sytem for ruby protocol %s" % protocol
+ raise
+
+ # Create a port proxy for connecting the system port. This is
+ # independent of the protocol and kept in the protocol-agnostic
+ # part (i.e. here).
+ sys_port_proxy = RubyPortProxy(ruby_system = ruby)
+
+ # Give the system port proxy a SimObject parent without creating a
+ # full-fledged controller
+ system.sys_port_proxy = sys_port_proxy
+
+ # Connect the system port for loading of binaries etc
+ system.system_port = system.sys_port_proxy.slave
# Create the network topology
- network = NetworkClass(ruby_system = ruby, topology = topology.description,
- routers = [], ext_links = [], int_links = [], netifs = [])
topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
RouterClass)
@@ -168,14 +168,12 @@ def create_system(options, system, piobus = None, dma_ports = []):
network.enable_fault_model = True
network.fault_model = FaultModel()
- #
# Loop through the directory controlers.
# Determine the total memory size of the ruby system and verify it is equal
# to physmem. However, if Ruby memory is using sparse memory in SE
# mode, then the system should not back-up the memory state with
# the Memory Vector and thus the memory size bytes should stay at 0.
# Also set the numa bits to the appropriate values.
- #
total_mem_size = MemorySize('0B')
ruby.block_size_bytes = options.cacheline_size
@@ -196,8 +194,6 @@ def create_system(options, system, piobus = None, dma_ports = []):
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(total_mem_size.value == phys_mem_size)
-
- ruby.network = network
ruby.mem_size = total_mem_size
# Connect the cpu sequencers and the piobus
diff --git a/src/mem/protocol/MESI_Three_Level-L0cache.sm b/src/mem/protocol/MESI_Three_Level-L0cache.sm
index f707ba963..49b6aa7a9 100644
--- a/src/mem/protocol/MESI_Three_Level-L0cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L0cache.sm
@@ -33,14 +33,13 @@ machine(L0Cache, "MESI Directory L0 Cache")
Cycles request_latency := 2;
Cycles response_latency := 2;
bool send_evictions;
-{
- // NODE L0 CACHE
- // From this node's L0 cache to the network
- MessageBuffer bufferToL1, network="To", physical_network="0", ordered="true";
- // To this node's L0 cache FROM the network
- MessageBuffer bufferFromL1, network="From", physical_network="0", ordered="true";
+ // From this node's L0 cache to the network
+ MessageBuffer * bufferToL1, network="To", ordered="true";
+ // To this node's L0 cache FROM the network
+ MessageBuffer * bufferFromL1, network="From", ordered="true";
+{
// Message queue between this controller and the processor
MessageBuffer mandatoryQueue, ordered="false";
diff --git a/src/mem/protocol/MESI_Three_Level-L1cache.sm b/src/mem/protocol/MESI_Three_Level-L1cache.sm
index 170599a51..59249d822 100644
--- a/src/mem/protocol/MESI_Three_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L1cache.sm
@@ -32,26 +32,30 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
Cycles l1_request_latency := 2;
Cycles l1_response_latency := 2;
Cycles to_l2_latency := 1;
-{
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestToL2, network="To", virtual_network="0", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer responseToL2, network="To", virtual_network="1", ordered="false", vnet_type="response";
- MessageBuffer unblockToL2, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestFromL2, network="From", virtual_network="0", ordered="false", vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer responseFromL2, network="From", virtual_network="1", ordered="false", vnet_type="response";
-
- // Message Buffers between the L1 and the L0 Cache
- // From the L1 cache to the L0 cache
- MessageBuffer bufferToL0, network="To", physical_network="0", ordered="true";
- // From the L0 cache to the L1 cache
- MessageBuffer bufferFromL0, network="From", physical_network="0", ordered="true";
+ // Message Buffers between the L1 and the L0 Cache
+ // From the L1 cache to the L0 cache
+ MessageBuffer * bufferToL0, network="To", ordered="true";
+
+ // From the L0 cache to the L1 cache
+ MessageBuffer * bufferFromL0, network="From", ordered="true";
+
+ // Message queue from this L1 cache TO the network / L2
+ MessageBuffer * requestToL2, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+
+ MessageBuffer * responseToL2, network="To", virtual_network="1",
+ ordered="false", vnet_type="response";
+ MessageBuffer * unblockToL2, network="To", virtual_network="2",
+ ordered="false", vnet_type="unblock";
+
+ // To this L1 cache FROM the network / L2
+ MessageBuffer * requestFromL2, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+ MessageBuffer * responseFromL2, network="From", virtual_network="1",
+ ordered="false", vnet_type="response";
+
+{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
diff --git a/src/mem/protocol/MESI_Two_Level-L1cache.sm b/src/mem/protocol/MESI_Two_Level-L1cache.sm
index 96c1699b7..6c98c23e9 100644
--- a/src/mem/protocol/MESI_Two_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L1cache.sm
@@ -37,25 +37,34 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
Cycles to_l2_latency := 1;
bool send_evictions;
bool enable_prefetch := "False";
+
+ // Message Queues
+ // From this node's L1 cache TO the network
+
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="unblock";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="response";
{
- // NODE L1 CACHE
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="response";
- MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="response";
// Request Buffer for prefetches
MessageBuffer optionalQueue, ordered="false";
-
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
diff --git a/src/mem/protocol/MESI_Two_Level-L2cache.sm b/src/mem/protocol/MESI_Two_Level-L2cache.sm
index f191ddccb..9e0522ea2 100644
--- a/src/mem/protocol/MESI_Two_Level-L2cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L2cache.sm
@@ -26,34 +26,33 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
- *
- */
-
machine(L2Cache, "MESI Directory L2 Cache CMP")
: CacheMemory * L2cache;
Cycles l2_request_latency := 2;
Cycles l2_response_latency := 2;
Cycles to_l1_latency := 1;
-{
- // L2 BANK QUEUES
+
+ // Message Queues
// From local bank of L2 cache TO the network
- MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0",
+ MessageBuffer * DirRequestFromL2Cache, network="To", virtual_network="0",
ordered="false", vnet_type="request"; // this L2 bank -> Memory
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0",
+
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="2",
ordered="false", vnet_type="request"; // this L2 bank -> a local L1
- MessageBuffer responseFromL2Cache, network="To", virtual_network="1",
+
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="1",
ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || Memory
// FROM the network to this local bank of L2 cache
- MessageBuffer unblockToL2Cache, network="From", virtual_network="2",
+ MessageBuffer * unblockToL2Cache, network="From", virtual_network="2",
ordered="false", vnet_type="unblock"; // a local L1 || Memory -> this L2 bank
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0",
+
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
ordered="false", vnet_type="request"; // a local L1 -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="1",
- ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
+{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
// Base states
diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm
index 679f2dee7..dd0ecf49e 100644
--- a/src/mem/protocol/MESI_Two_Level-dir.sm
+++ b/src/mem/protocol/MESI_Two_Level-dir.sm
@@ -26,27 +26,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-// This file is copied from Yasuko Watanabe's prefetch / memory protocol
-// Copied here by aep 12/14/07
-
-
machine(Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles to_mem_ctrl_latency := 1;
Cycles directory_latency := 6;
-{
- MessageBuffer requestToDir, network="From", virtual_network="0",
+
+ MessageBuffer * requestToDir, network="From", virtual_network="0",
ordered="false", vnet_type="request";
- MessageBuffer responseToDir, network="From", virtual_network="1",
+ MessageBuffer * responseToDir, network="From", virtual_network="1",
ordered="false", vnet_type="response";
- MessageBuffer responseFromDir, network="To", virtual_network="1",
+ MessageBuffer * responseFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="response";
-
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
diff --git a/src/mem/protocol/MESI_Two_Level-dma.sm b/src/mem/protocol/MESI_Two_Level-dma.sm
index 80c70c80a..e31832620 100644
--- a/src/mem/protocol/MESI_Two_Level-dma.sm
+++ b/src/mem/protocol/MESI_Two_Level-dma.sm
@@ -30,11 +30,12 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
Cycles request_latency := 6;
-{
-
- MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
@@ -74,7 +75,7 @@ machine(DMA, "DMA Controller")
error("DMA does not support get data block.");
}
- out_port(reqToDirectory_out, RequestMsg, reqToDirectory, desc="...");
+ out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
@@ -106,7 +107,7 @@ machine(DMA, "DMA Controller")
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+ enqueue(requestToDir_out, RequestMsg, request_latency) {
out_msg.Addr := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_READ;
out_msg.DataBlk := in_msg.DataBlk;
@@ -119,7 +120,7 @@ machine(DMA, "DMA Controller")
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+ enqueue(requestToDir_out, RequestMsg, request_latency) {
out_msg.Addr := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_WRITE;
out_msg.DataBlk := in_msg.DataBlk;
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 9b0c18bc8..ee774f4c2 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -28,20 +28,23 @@
*/
machine(L1Cache, "MI Example L1 Cache")
-: Sequencer * sequencer;
- CacheMemory * cacheMemory;
- Cycles cache_response_latency := 12;
- Cycles issue_latency := 2;
- bool send_evictions;
+ : Sequencer * sequencer;
+ CacheMemory * cacheMemory;
+ Cycles cache_response_latency := 12;
+ Cycles issue_latency := 2;
+ bool send_evictions;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="2",
+ ordered="true", vnet_type="request";
+ MessageBuffer * responseFromCache, network="To", virtual_network="4",
+ ordered="true", vnet_type="response";
+
+ MessageBuffer * forwardToCache, network="From", virtual_network="3",
+ ordered="true", vnet_type="forward";
+ MessageBuffer * responseToCache, network="From", virtual_network="4",
+ ordered="true", vnet_type="response";
{
-
- // NETWORK BUFFERS
- MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
- MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
-
- MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
- MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
-
// STATES
state_declaration(State, desc="Cache states") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index f0d85cba8..cd12e3eb7 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -28,18 +28,22 @@
*/
machine(Directory, "Directory protocol")
-: DirectoryMemory * directory;
- MemoryControl * memBuffer;
- Cycles directory_latency := 12;
+ : DirectoryMemory * directory;
+ MemoryControl * memBuffer;
+ Cycles directory_latency := 12;
+
+ MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+ ordered="false", vnet_type="forward";
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+ ordered="true", vnet_type="response";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ ordered="true", vnet_type="request";
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ ordered="true", vnet_type="request";
{
-
- MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
- MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
-
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
- MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
-
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
diff --git a/src/mem/protocol/MI_example-dma.sm b/src/mem/protocol/MI_example-dma.sm
index 14b8c4e4a..e328d9e20 100644
--- a/src/mem/protocol/MI_example-dma.sm
+++ b/src/mem/protocol/MI_example-dma.sm
@@ -28,13 +28,14 @@
*/
machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-{
-
- MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
@@ -69,7 +70,7 @@ machine(DMA, "DMA Controller")
error("DMA Controller does not support getDataBlock function.\n");
}
- out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+ out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
@@ -101,7 +102,7 @@ machine(DMA, "DMA Controller")
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
@@ -116,7 +117,7 @@ machine(DMA, "DMA Controller")
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index fb74a67e4..3cd87616f 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -34,25 +34,24 @@ machine(L1Cache, "Directory protocol")
Cycles request_latency := 2;
Cycles use_timeout_latency := 50;
bool send_evictions;
-{
-
- // NODE L1 CACHE
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
-// MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
-
-
+ // Message Queues
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="response";
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
+ ordered="false", vnet_type="request";
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="response";
+{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index 7d81f4164..46fd12a3a 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -30,20 +30,25 @@ machine(L2Cache, "Token protocol")
: CacheMemory * L2cache;
Cycles response_latency := 2;
Cycles request_latency := 2;
-{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false", vnet_type="request"; // this L2 bank -> a local L1
- MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request"; // this L2 bank -> mod-directory
- MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="0",
+ ordered="false", vnet_type="request"; // this L2 bank -> a local L1
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="request"; // this L2 bank -> mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
// FROM the network to this local bank of L2 cache
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false", vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
- MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request"; // mod-directory -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
-// MessageBuffer L1WritebackToL2Cache, network="From", virtual_network="3", ordered="false", vnet_type="writeback";
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
+ ordered="false", vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="request"; // mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
+{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index b403bc91c..272a8c9ab 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -30,16 +30,19 @@ machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles directory_latency := 6;
-{
-
- // ** IN QUEUES **
- MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
- MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
- MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
- MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
+ // Message Queues
+ MessageBuffer * requestToDir, network="From", virtual_network="1",
+ ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
+ MessageBuffer * forwardFromDir, network="To", virtual_network="1",
+ ordered="false", vnet_type="forward";
+ MessageBuffer * responseFromDir, network="To", virtual_network="2",
+ ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
index 1a8b3aea9..767a51a1f 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -27,22 +27,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 14;
- Cycles response_latency := 14;
-{
- MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
+machine(DMA, "DMA Controller")
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 14;
+ Cycles response_latency := 14;
+
+ MessageBuffer * responseFromDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * reqToDir, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+ MessageBuffer * respToDir, network="To", virtual_network="2",
+ ordered="false", vnet_type="dmaresponse";
- MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
- MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
-
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
}
-
+
enumeration(Event, desc="DMA events") {
ReadRequest, desc="A new read request";
WriteRequest, desc="A new write request";
@@ -293,7 +297,7 @@ machine(DMA, "DMA Controller")
}
transition(BUSY_WR, All_Acks, READY) {
- a_ackCallback;
+ a_ackCallback;
u_sendExclusiveUnblockToDir;
w_deallocateTBE;
p_popTriggerQueue;
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index b1197780f..860744384 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -48,24 +48,32 @@ machine(L1Cache, "Token protocol")
bool dynamic_timeout_enabled := "True";
bool no_mig_atomic := "True";
bool send_evictions;
-{
-
- // From this node's L1 cache TO the network
-
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
+ // Message Queues
+ // From this node's L1 cache TO the network
+
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+
+
+ // To this node's L1 cache FROM the network
+
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="request";
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
-
+{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index f8bd01695..a2488066a 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -32,29 +32,36 @@ machine(L2Cache, "Token protocol")
Cycles l2_request_latency := 5;
Cycles l2_response_latency := 5;
bool filtering_enabled := "True";
-{
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
-
- // this L2 bank -> a local L1 || mod-directory
- MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
- // this L2 bank -> mod-directory
- MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
- // this L2 bank -> a local L1
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+
+ // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ // this L2 bank -> mod-directory
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
+ ordered="false", vnet_type="request";
+ // this L2 bank -> a local L1
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+
+
+ // FROM the network to this local bank of L2 cache
+
+ // a local L1 || mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+ // mod-directory -> this L2 bank
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
+ ordered="false", vnet_type="request";
- // FROM the network to this local bank of L2 cache
-
- // a local L1 || mod-directory -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
- // mod-directory -> this L2 bank
- MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
-
+{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
// Base states
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index 5cb29fcc2..be5df02e0 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -34,18 +34,34 @@ machine(Directory, "Token protocol")
bool distributed_persistent := "True";
Cycles fixed_timeout_latency := 100;
Cycles reissue_wakeup_latency := 10;
-{
- MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
- MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
- MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
+ // Message Queues from dir to other controllers / network
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
+ ordered="true", vnet_type="response";
+
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * persistentFromDir, network="To", virtual_network="3",
+ ordered="true", vnet_type="persistent";
- MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
- MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
+ MessageBuffer * requestFromDir, network="To", virtual_network="1",
+ ordered="false", vnet_type="request";
+
+ // Message Queues to dir from other controllers / network
+ MessageBuffer * responseToDir, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * persistentToDir, network="From", virtual_network="3",
+ ordered="true", vnet_type="persistent";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="request";
+
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ ordered="true", vnet_type="request";
+
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_O") {
// Base states
diff --git a/src/mem/protocol/MOESI_CMP_token-dma.sm b/src/mem/protocol/MOESI_CMP_token-dma.sm
index 441a001fc..72b0e52a5 100644
--- a/src/mem/protocol/MOESI_CMP_token-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dma.sm
@@ -28,13 +28,16 @@
machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-{
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
- MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ // Messsage Queues
+ MessageBuffer * responseFromDir, network="From", virtual_network="5",
+ ordered="true", vnet_type="response";
+ MessageBuffer * reqToDirectory, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index 7c150bda0..de502e118 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -34,26 +34,29 @@
*/
machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
-: Sequencer * sequencer;
- CacheMemory * L1Icache;
- CacheMemory * L1Dcache;
- CacheMemory * L2cache;
- Cycles cache_response_latency := 10;
- Cycles issue_latency := 2;
- Cycles l2_cache_hit_latency := 10;
- bool no_mig_atomic := "True";
- bool send_evictions;
+ : Sequencer * sequencer;
+ CacheMemory * L1Icache;
+ CacheMemory * L1Dcache;
+ CacheMemory * L2cache;
+ Cycles cache_response_latency := 10;
+ Cycles issue_latency := 2;
+ Cycles l2_cache_hit_latency := 10;
+ bool no_mig_atomic := "True";
+ bool send_evictions;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="2",
+ ordered="false", vnet_type="request";
+ MessageBuffer * responseFromCache, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
+ MessageBuffer * unblockFromCache, network="To", virtual_network="5",
+ ordered="false", vnet_type="unblock";
+
+ MessageBuffer * forwardToCache, network="From", virtual_network="3",
+ ordered="false", vnet_type="forward";
+ MessageBuffer * responseToCache, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
{
-
- // NETWORK BUFFERS
- MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
- MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
-
- MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
- MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
-
-
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index 4e2f846e2..db11b290f 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -34,28 +34,37 @@
*/
machine(Directory, "AMD Hammer-like protocol")
-: DirectoryMemory * directory;
- CacheMemory * probeFilter;
- MemoryControl * memBuffer;
- Cycles memory_controller_latency := 2;
- bool probe_filter_enabled := "False";
- bool full_bit_dir_enabled := "False";
-{
+ : DirectoryMemory * directory;
+ CacheMemory * probeFilter;
+ MemoryControl * memBuffer;
+ Cycles memory_controller_latency := 2;
+ bool probe_filter_enabled := "False";
+ bool full_bit_dir_enabled := "False";
- MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
- MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
- //
- // For a finite buffered network, note that the DMA response network only
- // works at this relatively lower numbered (lower priority) virtual network
- // because the trigger queue decouples cache responses from DMA responses.
- //
- MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
+ MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+ ordered="false", vnet_type="forward";
+
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ ordered="false", vnet_type="response";
- MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
- MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
- MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
+ // For a finite buffered network, note that the DMA response network only
+ // works at this relatively lower numbered (lower priority) virtual network
+ // because the trigger queue decouples cache responses from DMA responses.
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * unblockToDir, network="From", virtual_network="5",
+ ordered="false", vnet_type="unblock";
+
+ MessageBuffer * responseToDir, network="From", virtual_network="4",
+ ordered="false", vnet_type="response";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type="request", recycle_latency="1";
+
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ ordered="true", vnet_type="request";
+{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_E") {
// Base states
diff --git a/src/mem/protocol/MOESI_hammer-dma.sm b/src/mem/protocol/MOESI_hammer-dma.sm
index e4d26bb48..ab41adb4d 100644
--- a/src/mem/protocol/MOESI_hammer-dma.sm
+++ b/src/mem/protocol/MOESI_hammer-dma.sm
@@ -28,16 +28,15 @@
machine(DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-{
-
- MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
- MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
- state_declaration(State,
- desc="DMA states",
- default="DMA_State_READY") {
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ ordered="true", vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ ordered="false", vnet_type="request";
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
@@ -71,7 +70,7 @@ machine(DMA, "DMA Controller")
error("DMA Controller does not support getDataBlock function.\n");
}
- out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+ out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
@@ -103,7 +102,7 @@ machine(DMA, "DMA Controller")
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
@@ -118,7 +117,7 @@ machine(DMA, "DMA Controller")
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
diff --git a/src/mem/protocol/Network_test-cache.sm b/src/mem/protocol/Network_test-cache.sm
index f69aecd93..e0307152d 100644
--- a/src/mem/protocol/Network_test-cache.sm
+++ b/src/mem/protocol/Network_test-cache.sm
@@ -32,15 +32,17 @@
machine(L1Cache, "Network_test L1 Cache")
-: Sequencer * sequencer;
- Cycles issue_latency := 2;
+ : Sequencer * sequencer;
+ Cycles issue_latency := 2;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="0",
+ ordered="false", vnet_type = "request";
+ MessageBuffer * forwardFromCache, network="To", virtual_network="1",
+ ordered="false", vnet_type = "forward";
+ MessageBuffer * responseFromCache, network="To", virtual_network="2",
+ ordered="false", vnet_type = "response";
{
-
- // NETWORK BUFFERS
- MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false", vnet_type = "request";
- MessageBuffer forwardFromCache, network="To", virtual_network="1", ordered="false", vnet_type = "forward";
- MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false", vnet_type = "response";
-
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";
diff --git a/src/mem/protocol/Network_test-dir.sm b/src/mem/protocol/Network_test-dir.sm
index 47e248dff..4d6472c54 100644
--- a/src/mem/protocol/Network_test-dir.sm
+++ b/src/mem/protocol/Network_test-dir.sm
@@ -32,13 +32,13 @@
machine(Directory, "Network_test Directory")
-:
+ : MessageBuffer * requestToDir, network="From", virtual_network="0",
+ ordered="false", vnet_type = "request";
+ MessageBuffer * forwardToDir, network="From", virtual_network="1",
+ ordered="false", vnet_type = "forward";
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ ordered="false", vnet_type = "response";
{
-
- MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type = "request";
- MessageBuffer forwardToDir, network="From", virtual_network="1", ordered="false", vnet_type = "forward";
- MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type = "response";
-
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
diff --git a/src/mem/ruby/network/Network.cc b/src/mem/ruby/network/Network.cc
index 60531a423..2d9376b08 100644
--- a/src/mem/ruby/network/Network.cc
+++ b/src/mem/ruby/network/Network.cc
@@ -57,19 +57,6 @@ Network::Network(const Params *p)
// Queues that are feeding the protocol
m_fromNetQueues.resize(m_nodes);
- for (int node = 0; node < m_nodes; node++) {
- // Setting number of virtual message buffers per Network Queue
- m_toNetQueues[node].resize(m_virtual_networks);
- m_fromNetQueues[node].resize(m_virtual_networks);
-
- // Instantiating the Message Buffers that
- // interact with the coherence protocol
- for (int j = 0; j < m_virtual_networks; j++) {
- m_toNetQueues[node][j] = new MessageBuffer();
- m_fromNetQueues[node][j] = new MessageBuffer();
- }
- }
-
m_in_use.resize(m_virtual_networks);
m_ordered.resize(m_virtual_networks);
@@ -95,10 +82,14 @@ Network::Network(const Params *p)
Network::~Network()
{
for (int node = 0; node < m_nodes; node++) {
+
// Delete the Message Buffers
- for (int j = 0; j < m_virtual_networks; j++) {
- delete m_toNetQueues[node][j];
- delete m_fromNetQueues[node][j];
+ for (auto& it : m_toNetQueues[node]) {
+ delete it.second;
+ }
+
+ for (auto& it : m_fromNetQueues[node]) {
+ delete it.second;
}
}
diff --git a/src/mem/ruby/network/Network.hh b/src/mem/ruby/network/Network.hh
index dcdd791e7..d595ca285 100644
--- a/src/mem/ruby/network/Network.hh
+++ b/src/mem/ruby/network/Network.hh
@@ -72,11 +72,10 @@ class Network : public ClockedObject
static uint32_t MessageSizeType_to_int(MessageSizeType size_type);
// returns the queue requested for the given component
- virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered,
- int netNumber, std::string vnet_type) = 0;
- virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered,
- int netNumber, std::string vnet_type) = 0;
-
+ virtual void setToNetQueue(NodeID id, bool ordered, int netNumber,
+ std::string vnet_type, MessageBuffer *b) = 0;
+ virtual void setFromNetQueue(NodeID id, bool ordered, int netNumber,
+ std::string vnet_type, MessageBuffer *b) = 0;
virtual void makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
LinkDirection direction,
@@ -113,8 +112,8 @@ class Network : public ClockedObject
static uint32_t m_data_msg_size;
// vector of queues from the components
- std::vector<std::vector<MessageBuffer*> > m_toNetQueues;
- std::vector<std::vector<MessageBuffer*> > m_fromNetQueues;
+ std::vector<std::map<int, MessageBuffer*> > m_toNetQueues;
+ std::vector<std::map<int, MessageBuffer*> > m_fromNetQueues;
std::vector<bool> m_in_use;
std::vector<bool> m_ordered;
diff --git a/src/mem/ruby/network/Network.py b/src/mem/ruby/network/Network.py
index 4f33dd196..8cc38f26f 100644
--- a/src/mem/ruby/network/Network.py
+++ b/src/mem/ruby/network/Network.py
@@ -28,7 +28,6 @@
# Brad Beckmann
from m5.params import *
-from m5.SimObject import SimObject
from ClockedObject import ClockedObject
from BasicLink import BasicLink
@@ -48,3 +47,6 @@ class RubyNetwork(ClockedObject):
netifs = VectorParam.ClockedObject("Network Interfaces")
ext_links = VectorParam.BasicExtLink("Links to external nodes")
int_links = VectorParam.BasicIntLink("Links between internal nodes")
+
+ slave = VectorSlavePort("CPU slave port")
+ master = VectorMasterPort("CPU master port")
diff --git a/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc b/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc
index 92e45c36d..2aeddad37 100644
--- a/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc
+++ b/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc
@@ -66,20 +66,20 @@ BaseGarnetNetwork::init()
Network::init();
}
-MessageBuffer*
-BaseGarnetNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
- string vnet_type)
+void
+BaseGarnetNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
+ string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num, vnet_type);
- return m_toNetQueues[id][network_num];
+ m_toNetQueues[id][network_num] = b;
}
-MessageBuffer*
-BaseGarnetNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
- string vnet_type)
+void
+BaseGarnetNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
+ string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num, vnet_type);
- return m_fromNetQueues[id][network_num];
+ m_fromNetQueues[id][network_num] = b;
}
void
diff --git a/src/mem/ruby/network/garnet/BaseGarnetNetwork.hh b/src/mem/ruby/network/garnet/BaseGarnetNetwork.hh
index c4bb9f5b1..cc1d4d929 100644
--- a/src/mem/ruby/network/garnet/BaseGarnetNetwork.hh
+++ b/src/mem/ruby/network/garnet/BaseGarnetNetwork.hh
@@ -68,12 +68,11 @@ class BaseGarnetNetwork : public Network
m_queueing_latency[vnet] += latency;
}
- // returns the queue requested for the given component
- MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type);
- MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type);
-
+ // set the queue
+ void setToNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
+ void setFromNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
index 2f1b5ee46..7384cc6a7 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
@@ -53,8 +53,6 @@ NetworkInterface_d::NetworkInterface_d(const Params *p)
m_vc_round_robin = 0;
m_ni_buffers.resize(m_num_vcs);
m_ni_enqueue_time.resize(m_num_vcs);
- inNode_ptr.resize(m_virtual_networks);
- outNode_ptr.resize(m_virtual_networks);
creditQueue = new flitBuffer_d();
// instantiating the NI flit buffers
@@ -108,18 +106,20 @@ NetworkInterface_d::addOutPort(NetworkLink_d *out_link,
}
void
-NetworkInterface_d::addNode(vector<MessageBuffer *>& in,
- vector<MessageBuffer *>& out)
+NetworkInterface_d::addNode(map<int, MessageBuffer *>& in,
+ map<int, MessageBuffer *>& out)
{
- assert(in.size() == m_virtual_networks);
inNode_ptr = in;
outNode_ptr = out;
- for (int j = 0; j < m_virtual_networks; j++) {
+ for (auto& it : in) {
// the protocol injects messages into the NI
- inNode_ptr[j]->setConsumer(this);
- inNode_ptr[j]->setReceiver(this);
- outNode_ptr[j]->setSender(this);
+ it.second->setConsumer(this);
+ it.second->setReceiver(this);
+ }
+
+ for (auto& it : out) {
+ it.second->setSender(this);
}
}
@@ -223,11 +223,14 @@ NetworkInterface_d::wakeup()
// Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- while (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
- msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+ for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
+ int vnet = (*it).first;
+ MessageBuffer *b = (*it).second;
+
+ while (b->isReady()) { // Is there a message waiting
+ msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- inNode_ptr[vnet]->dequeue();
+ b->dequeue();
} else {
break;
}
@@ -351,12 +354,15 @@ NetworkInterface_d::get_vnet(int vc)
void
NetworkInterface_d::checkReschedule()
{
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
+ for (const auto& it : inNode_ptr) {
+ MessageBuffer *b = it.second;
+
+ while (b->isReady()) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
}
+
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh
index 05142cd28..2494d05d1 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh
@@ -60,8 +60,9 @@ class NetworkInterface_d : public ClockedObject, public Consumer
void addOutPort(NetworkLink_d *out_link, CreditLink_d *credit_link);
void wakeup();
- void addNode(std::vector<MessageBuffer *> &inNode,
- std::vector<MessageBuffer *> &outNode);
+ void addNode(std::map<int, MessageBuffer *> &inNode,
+ std::map<int, MessageBuffer *> &outNode);
+
void print(std::ostream& out) const;
int get_vnet(int vc);
void init_net_ptr(GarnetNetwork_d *net_ptr) { m_net_ptr = net_ptr; }
@@ -89,9 +90,9 @@ class NetworkInterface_d : public ClockedObject, public Consumer
std::vector<Cycles> m_ni_enqueue_time;
// The Message buffers that takes messages from the protocol
- std::vector<MessageBuffer *> inNode_ptr;
+ std::map<int, MessageBuffer *> inNode_ptr;
// The Message buffers that provides messages to the protocol
- std::vector<MessageBuffer *> outNode_ptr;
+ std::map<int, MessageBuffer *> outNode_ptr;
bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
int calculateVC(int vnet);
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
index 13bbe2b08..26d2423e8 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
@@ -49,13 +49,10 @@ NetworkInterface::NetworkInterface(const Params *p)
m_virtual_networks = p->virt_nets;
m_vc_per_vnet = p->vcs_per_vnet;
m_num_vcs = m_vc_per_vnet*m_virtual_networks;
-
m_vc_round_robin = 0;
- m_ni_buffers.resize(m_num_vcs);
- inNode_ptr.resize(m_virtual_networks);
- outNode_ptr.resize(m_virtual_networks);
// instantiating the NI flit buffers
+ m_ni_buffers.resize(m_num_vcs);
for (int i =0; i < m_num_vcs; i++)
m_ni_buffers[i] = new flitBuffer();
@@ -93,18 +90,20 @@ NetworkInterface::addOutPort(NetworkLink *out_link)
}
void
-NetworkInterface::addNode(vector<MessageBuffer*>& in,
- vector<MessageBuffer*>& out)
+NetworkInterface::addNode(map<int, MessageBuffer*>& in,
+ map<int, MessageBuffer*>& out)
{
- assert(in.size() == m_virtual_networks);
inNode_ptr = in;
outNode_ptr = out;
- // protocol injects messages into the NI
- for (int j = 0; j < m_virtual_networks; j++) {
- inNode_ptr[j]->setConsumer(this);
- inNode_ptr[j]->setReceiver(this);
- outNode_ptr[j]->setSender(this);
+ for (auto& it: in) {
+ // the protocol injects messages into the NI
+ it.second->setConsumer(this);
+ it.second->setReceiver(this);
+ }
+
+ for (auto& it : out) {
+ it.second->setSender(this);
}
}
@@ -243,12 +242,14 @@ NetworkInterface::wakeup()
//Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- while (inNode_ptr[vnet]->isReady()) // Is there a message waiting
- {
- msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
+ for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
+ int vnet = (*it).first;
+ MessageBuffer *b = (*it).second;
+
+ while (b->isReady()) { // Is there a message waiting
+ msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- inNode_ptr[vnet]->dequeue();
+ b->dequeue();
} else {
break;
}
@@ -324,14 +325,17 @@ NetworkInterface::scheduleOutputLink()
void
NetworkInterface::checkReschedule()
{
- for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
- if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
+ for (const auto& it : inNode_ptr) {
+ MessageBuffer *b = it.second;
+
+ while (b->isReady()) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
}
+
for (int vc = 0; vc < m_num_vcs; vc++) {
- if (m_ni_buffers[vc]->isReadyForNext(curCycle())) {
+ if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
return;
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh
index 0af538bf2..aa30bd758 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh
@@ -56,10 +56,10 @@ class NetworkInterface : public ClockedObject, public FlexibleConsumer
void addInPort(NetworkLink *in_link);
void addOutPort(NetworkLink *out_link);
+ void addNode(std::map<int, MessageBuffer *> &inNode,
+ std::map<int, MessageBuffer *> &outNode);
void wakeup();
- void addNode(std::vector<MessageBuffer *> &inNode,
- std::vector<MessageBuffer *> &outNode);
void grant_vc(int out_port, int vc, Cycles grant_time);
void release_vc(int out_port, int vc, Cycles release_time);
@@ -93,10 +93,10 @@ class NetworkInterface : public ClockedObject, public FlexibleConsumer
std::vector<flitBuffer *> m_ni_buffers;
// The Message buffers that takes messages from the protocol
- std::vector<MessageBuffer *> inNode_ptr;
+ std::map<int, MessageBuffer *> inNode_ptr;
// The Message buffers that provides messages to the protocol
- std::vector<MessageBuffer *> outNode_ptr;
+ std::map<int, MessageBuffer *> outNode_ptr;
bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
int calculateVC(int vnet);
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
index 851ababc4..0fc2c6be3 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
@@ -387,7 +387,7 @@ Router::checkReschedule()
{
for (int port = 0; port < m_out_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
- if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
+ if (m_router_buffers[port][vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
return;
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.cc b/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.cc
index 7a8ea8c23..ee31ac3d2 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.cc
@@ -62,17 +62,6 @@ flitBuffer::isReady(Cycles curTime)
}
bool
-flitBuffer::isReadyForNext(Cycles curTime)
-{
- if (m_buffer.size() != 0 ) {
- flit *t_flit = m_buffer.front();
- if (t_flit->get_time() <= (curTime + 1))
- return true;
- }
- return false;
-}
-
-bool
flitBuffer::isFull()
{
return (m_buffer.size() >= max_size);
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh b/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh
index 609c5a9b6..99fa2678b 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh
@@ -44,7 +44,6 @@ class flitBuffer
flitBuffer(int maximum_size);
bool isReady(Cycles curTime);
- bool isReadyForNext(Cycles curTime);
bool isFull();
bool isEmpty();
void setMaxSize(int maximum);
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 0c6111c48..4565711a2 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -61,36 +61,33 @@ PerfectSwitch::init(SimpleNetwork *network_ptr)
{
m_network_ptr = network_ptr;
- for(int i = 0;i < m_virtual_networks;++i)
- {
+ for(int i = 0;i < m_virtual_networks;++i) {
m_pending_message_count.push_back(0);
}
}
void
-PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
+PerfectSwitch::addInPort(const map<int, MessageBuffer*>& in)
{
- assert(in.size() == m_virtual_networks);
NodeID port = m_in.size();
m_in.push_back(in);
- for (int j = 0; j < m_virtual_networks; j++) {
- m_in[port][j]->setConsumer(this);
+ for (auto& it : in) {
+ it.second->setConsumer(this);
string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
- to_string(m_switch_id), to_string(port), to_string(j));
- m_in[port][j]->setDescription(desc);
- m_in[port][j]->setIncomingLink(port);
- m_in[port][j]->setVnet(j);
+ to_string(m_switch_id), to_string(port), to_string(it.first));
+
+ it.second->setDescription(desc);
+ it.second->setIncomingLink(port);
+ it.second->setVnet(it.first);
}
}
void
-PerfectSwitch::addOutPort(const vector<MessageBuffer*>& out,
+PerfectSwitch::addOutPort(const map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry)
{
- assert(out.size() == m_virtual_networks);
-
// Setup link order
LinkOrder l;
l.m_value = 0;
@@ -152,11 +149,16 @@ PerfectSwitch::wakeup()
vector<NetDest> output_link_destinations;
// Is there a message waiting?
- while (m_in[incoming][vnet]->isReady()) {
+ auto it = m_in[incoming].find(vnet);
+ if (it == m_in[incoming].end())
+ continue;
+ MessageBuffer *buffer = (*it).second;
+
+ while (buffer->isReady()) {
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// Peek at message
- msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
+ msg_ptr = buffer->peekMsgPtr();
net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
@@ -261,7 +263,7 @@ PerfectSwitch::wakeup()
}
// Dequeue msg
- m_in[incoming][vnet]->dequeue();
+ buffer->dequeue();
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh b/src/mem/ruby/network/simple/PerfectSwitch.hh
index c01c50a3b..25e3e2754 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -65,9 +65,10 @@ class PerfectSwitch : public Consumer
{ return csprintf("PerfectSwitch-%i", m_switch_id); }
void init(SimpleNetwork *);
- void addInPort(const std::vector<MessageBuffer*>& in);
- void addOutPort(const std::vector<MessageBuffer*>& out,
+ void addInPort(const std::map<int, MessageBuffer*>& in);
+ void addOutPort(const std::map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry);
+
int getInLinks() const { return m_in.size(); }
int getOutLinks() const { return m_out.size(); }
@@ -86,8 +87,9 @@ class PerfectSwitch : public Consumer
SwitchID m_switch_id;
// vector of queues from the components
- std::vector<std::vector<MessageBuffer*> > m_in;
- std::vector<std::vector<MessageBuffer*> > m_out;
+ std::vector<std::map<int, MessageBuffer*> > m_in;
+ std::vector<std::map<int, MessageBuffer*> > m_out;
+
std::vector<NetDest> m_routing_table;
std::vector<LinkOrder> m_link_order;
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.cc b/src/mem/ruby/network/simple/SimpleNetwork.cc
index 2d08f9fa4..f51a0c891 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.cc
+++ b/src/mem/ruby/network/simple/SimpleNetwork.cc
@@ -93,10 +93,9 @@ SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
SimpleExtLink *simple_link = safe_cast<SimpleExtLink*>(link);
- m_switches[src]->addOutPort(m_fromNetQueues[dest],
- routing_table_entry,
- simple_link->m_latency,
- simple_link->m_bw_multiplier);
+ m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
+ simple_link->m_latency,
+ simple_link->m_bw_multiplier);
m_endpoint_switches[dest] = m_switches[src];
}
@@ -118,25 +117,28 @@ SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link,
const NetDest& routing_table_entry)
{
// Create a set of new MessageBuffers
- std::vector<MessageBuffer*> queues;
+ std::map<int, MessageBuffer*> queues;
for (int i = 0; i < m_virtual_networks; i++) {
// allocate a buffer
MessageBuffer* buffer_ptr = new MessageBuffer;
buffer_ptr->setOrdering(true);
+
if (m_buffer_size > 0) {
buffer_ptr->resize(m_buffer_size);
}
- queues.push_back(buffer_ptr);
+
+ queues[i] = buffer_ptr;
// remember to deallocate it
m_buffers_to_free.push_back(buffer_ptr);
}
+
// Connect it to the two switches
SimpleIntLink *simple_link = safe_cast<SimpleIntLink*>(link);
m_switches[dest]->addInPort(queues);
m_switches[src]->addOutPort(queues, routing_table_entry,
- simple_link->m_latency,
- simple_link->m_bw_multiplier);
+ simple_link->m_latency,
+ simple_link->m_bw_multiplier);
}
void
@@ -151,20 +153,20 @@ SimpleNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
m_in_use[network_num] = true;
}
-MessageBuffer*
-SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type)
+void
+SimpleNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num);
- return m_toNetQueues[id][network_num];
+ m_toNetQueues[id][network_num] = b;
}
-MessageBuffer*
-SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
- std::string vnet_type)
+void
+SimpleNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num);
- return m_fromNetQueues[id][network_num];
+ m_fromNetQueues[id][network_num] = b;
}
void
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.hh b/src/mem/ruby/network/simple/SimpleNetwork.hh
index 90560c267..a2723c715 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.hh
+++ b/src/mem/ruby/network/simple/SimpleNetwork.hh
@@ -56,9 +56,11 @@ class SimpleNetwork : public Network
void collateStats();
void regStats();
- // returns the queue requested for the given component
- MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
- MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
+ // sets the queue requested
+ void setToNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
+ void setFromNetQueue(NodeID id, bool ordered, int network_num,
+ std::string vnet_type, MessageBuffer *b);
bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
@@ -89,6 +91,7 @@ class SimpleNetwork : public Network
// Private copy constructor and assignment operator
SimpleNetwork(const SimpleNetwork& obj);
SimpleNetwork& operator=(const SimpleNetwork& obj);
+
std::vector<Switch*> m_switches;
std::vector<MessageBuffer*> m_buffers_to_free;
std::vector<Switch*> m_endpoint_switches;
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index 6e116d82c..e028de02a 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -64,29 +64,33 @@ Switch::init()
}
void
-Switch::addInPort(const vector<MessageBuffer*>& in)
+Switch::addInPort(const map<int, MessageBuffer*>& in)
{
m_perfect_switch->addInPort(in);
- for (int i = 0; i < in.size(); i++) {
- in[i]->setReceiver(this);
+ for (auto& it : in) {
+ it.second->setReceiver(this);
}
}
void
-Switch::addOutPort(const vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
+Switch::addOutPort(const map<int, MessageBuffer*>& out,
+ const NetDest& routing_table_entry,
+ Cycles link_latency, int bw_multiplier)
{
// Create a throttle
Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
- link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
- this);
+ link_latency, bw_multiplier,
+ m_network_ptr->getEndpointBandwidth(),
+ this);
+
m_throttles.push_back(throttle_ptr);
// Create one buffer per vnet (these are intermediaryQueues)
- vector<MessageBuffer*> intermediateBuffers;
- for (int i = 0; i < out.size(); i++) {
- out[i]->setSender(this);
+ map<int, MessageBuffer*> intermediateBuffers;
+
+ for (auto& it : out) {
+ it.second->setSender(this);
MessageBuffer* buffer_ptr = new MessageBuffer;
// Make these queues ordered
@@ -95,7 +99,7 @@ Switch::addOutPort(const vector<MessageBuffer*>& out,
buffer_ptr->resize(m_network_ptr->getBufferSize());
}
- intermediateBuffers.push_back(buffer_ptr);
+ intermediateBuffers[it.first] = buffer_ptr;
m_buffers_to_free.push_back(buffer_ptr);
buffer_ptr->setSender(this);
diff --git a/src/mem/ruby/network/simple/Switch.hh b/src/mem/ruby/network/simple/Switch.hh
index 58193d42d..d4e5c5eba 100644
--- a/src/mem/ruby/network/simple/Switch.hh
+++ b/src/mem/ruby/network/simple/Switch.hh
@@ -60,12 +60,13 @@ class Switch : public BasicRouter
typedef SwitchParams Params;
Switch(const Params *p);
~Switch();
-
void init();
- void addInPort(const std::vector<MessageBuffer*>& in);
- void addOutPort(const std::vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, Cycles link_latency,
- int bw_multiplier);
+
+ void addInPort(const std::map<int, MessageBuffer*>& in);
+ void addOutPort(const std::map<int, MessageBuffer*>& out,
+ const NetDest& routing_table_entry,
+ Cycles link_latency, int bw_multiplier);
+
const Throttle* getThrottle(LinkID link_number) const;
void resetStats();
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index 40958a6da..91bad217b 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -69,42 +69,92 @@ Throttle::init(NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth)
{
m_node = node;
- m_vnets = 0;
-
assert(link_bandwidth_multiplier > 0);
m_link_bandwidth_multiplier = link_bandwidth_multiplier;
+
m_link_latency = link_latency;
m_endpoint_bandwidth = endpoint_bandwidth;
m_wakeups_wo_switch = 0;
-
m_link_utilization_proxy = 0;
}
void
-Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
- const std::vector<MessageBuffer*>& out_vec)
+Throttle::addLinks(const map<int, MessageBuffer*>& in_vec,
+ const map<int, MessageBuffer*>& out_vec)
{
assert(in_vec.size() == out_vec.size());
- for (int i=0; i<in_vec.size(); i++) {
- addVirtualNetwork(in_vec[i], out_vec[i]);
+
+ for (auto& it : in_vec) {
+ int vnet = it.first;
+
+ auto jt = out_vec.find(vnet);
+ assert(jt != out_vec.end());
+
+ MessageBuffer *in_ptr = it.second;
+ MessageBuffer *out_ptr = (*jt).second;
+
+ m_in[vnet] = in_ptr;
+ m_out[vnet] = out_ptr;
+ m_units_remaining[vnet] = 0;
+
+ // Set consumer and description
+ in_ptr->setConsumer(this);
+ string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
+ to_string(m_node) + "]";
+ in_ptr->setDescription(desc);
}
}
void
-Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
+Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
+ MessageBuffer *in, MessageBuffer *out)
{
- m_units_remaining.push_back(0);
- m_in.push_back(in_ptr);
- m_out.push_back(out_ptr);
+ assert(out != NULL);
+ assert(in != NULL);
+ assert(m_units_remaining[vnet] >= 0);
+
+ while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
+ out->areNSlotsAvailable(1)) {
+
+ // See if we are done transferring the previous message on
+ // this virtual network
+ if (m_units_remaining[vnet] == 0 && in->isReady()) {
+ // Find the size of the message we are moving
+ MsgPtr msg_ptr = in->peekMsgPtr();
+ NetworkMessage* net_msg_ptr =
+ safe_cast<NetworkMessage*>(msg_ptr.get());
+ m_units_remaining[vnet] +=
+ network_message_to_size(net_msg_ptr);
+
+ DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
+ "enqueueing net msg %d time: %lld.\n",
+ m_node, getLinkBandwidth(), m_units_remaining[vnet],
+ g_system_ptr->curCycle());
+
+ // Move the message
+ in->dequeue();
+ out->enqueue(msg_ptr, m_link_latency);
+
+ // Count the message
+ m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
+ DPRINTF(RubyNetwork, "%s\n", *out);
+ }
+
+ // Calculate the amount of bandwidth we spent on this message
+ int diff = m_units_remaining[vnet] - bw_remaining;
+ m_units_remaining[vnet] = max(0, diff);
+ bw_remaining = max(0, -diff);
+ }
- // Set consumer and description
- m_in[m_vnets]->setConsumer(this);
+ if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
+ !out->areNSlotsAvailable(1)) {
+ DPRINTF(RubyNetwork, "vnet: %d", vnet);
- string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
- to_string(m_node) + "]";
- m_in[m_vnets]->setDescription(desc);
- m_vnets++;
+ // schedule me to wakeup again because I'm waiting for my
+ // output queue to become available
+ schedule_wakeup = true;
+ }
}
void
@@ -114,71 +164,30 @@ Throttle::wakeup()
assert(getLinkBandwidth() > 0);
int bw_remaining = getLinkBandwidth();
- // Give the highest numbered link priority most of the time
m_wakeups_wo_switch++;
- int highest_prio_vnet = m_vnets-1;
- int lowest_prio_vnet = 0;
- int counter = 1;
bool schedule_wakeup = false;
+ // variable for deciding the direction in which to iterate
+ bool iteration_direction = false;
+
+
// invert priorities to avoid starvation seen in the component network
if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
m_wakeups_wo_switch = 0;
- highest_prio_vnet = 0;
- lowest_prio_vnet = m_vnets-1;
- counter = -1;
+ iteration_direction = true;
}
- for (int vnet = highest_prio_vnet;
- (vnet * counter) >= (counter * lowest_prio_vnet);
- vnet -= counter) {
-
- assert(m_out[vnet] != NULL);
- assert(m_in[vnet] != NULL);
- assert(m_units_remaining[vnet] >= 0);
-
- while (bw_remaining > 0 &&
- (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
- m_out[vnet]->areNSlotsAvailable(1)) {
-
- // See if we are done transferring the previous message on
- // this virtual network
- if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
- // Find the size of the message we are moving
- MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
- NetworkMessage* net_msg_ptr =
- safe_cast<NetworkMessage*>(msg_ptr.get());
- m_units_remaining[vnet] +=
- network_message_to_size(net_msg_ptr);
-
- DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
- "enqueueing net msg %d time: %lld.\n",
- m_node, getLinkBandwidth(), m_units_remaining[vnet],
- g_system_ptr->curCycle());
-
- // Move the message
- m_in[vnet]->dequeue();
- m_out[vnet]->enqueue(msg_ptr, m_link_latency);
-
- // Count the message
- m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
-
- DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
- }
-
- // Calculate the amount of bandwidth we spent on this message
- int diff = m_units_remaining[vnet] - bw_remaining;
- m_units_remaining[vnet] = max(0, diff);
- bw_remaining = max(0, -diff);
+ if (iteration_direction) {
+ for (auto& it : m_in) {
+ int vnet = it.first;
+ operateVnet(vnet, bw_remaining, schedule_wakeup,
+ it.second, m_out[vnet]);
}
-
- if (bw_remaining > 0 &&
- (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
- !m_out[vnet]->areNSlotsAvailable(1)) {
- DPRINTF(RubyNetwork, "vnet: %d", vnet);
- // schedule me to wakeup again because I'm waiting for my
- // output queue to become available
- schedule_wakeup = true;
+ } else {
+ for (auto it = m_in.rbegin(); it != m_in.rend(); ++it) {
+ int vnet = (*it).first;
+ operateVnet(vnet, bw_remaining, schedule_wakeup,
+ (*it).second, m_out[vnet]);
}
}
@@ -215,7 +224,7 @@ Throttle::regStats(string parent)
for (MessageSizeType type = MessageSizeType_FIRST;
type < MessageSizeType_NUM; ++type) {
m_msg_counts[(unsigned int)type]
- .init(m_vnets)
+ .init(Network::getNumberOfVirtualNetworks())
.name(parent + csprintf(".throttle%i", m_node) + ".msg_count." +
MessageSizeType_to_string(type))
.flags(Stats::nozero)
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
index cdc627bb7..d978f14fd 100644
--- a/src/mem/ruby/network/simple/Throttle.hh
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -62,8 +62,8 @@ class Throttle : public Consumer
std::string name()
{ return csprintf("Throttle-%i", m_sID); }
- void addLinks(const std::vector<MessageBuffer*>& in_vec,
- const std::vector<MessageBuffer*>& out_vec);
+ void addLinks(const std::map<int, MessageBuffer*>& in_vec,
+ const std::map<int, MessageBuffer*>& out_vec);
void wakeup();
// The average utilization (a fraction) since last clearStats()
@@ -85,16 +85,17 @@ class Throttle : public Consumer
private:
void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth);
- void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
+ void operateVnet(int vnet, int &bw_remainin, bool &schedule_wakeup,
+ MessageBuffer *in, MessageBuffer *out);
// Private copy constructor and assignment operator
Throttle(const Throttle& obj);
Throttle& operator=(const Throttle& obj);
- std::vector<MessageBuffer*> m_in;
- std::vector<MessageBuffer*> m_out;
- unsigned int m_vnets;
- std::vector<int> m_units_remaining;
+ std::map<int, MessageBuffer*> m_in;
+ std::map<int, MessageBuffer*> m_out;
+ std::map<int, int> m_units_remaining;
+
int m_sID;
NodeID m_node;
int m_link_bandwidth_multiplier;
diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc
index 0f5a70a6e..366ea04ce 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.cc
+++ b/src/mem/ruby/slicc_interface/AbstractController.cc
@@ -89,13 +89,6 @@ AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
}
void
-AbstractController::connectWithPeer(AbstractController *c)
-{
- getQueuesFromPeer(c);
- c->getQueuesFromPeer(this);
-}
-
-void
AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
{
if (m_waiting_buffers.count(addr) == 0) {
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index 36b4665c3..42d158653 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -96,6 +96,9 @@ class AbstractController : public ClockedObject, public Consumer
virtual void collateStats()
{fatal("collateStats() should be overridden!");}
+ //! Set the message buffer with given name.
+ virtual void setNetQueue(const std::string& name, MessageBuffer *b) = 0;
+
public:
MachineID getMachineID() const { return m_machineID; }
@@ -103,25 +106,12 @@ class AbstractController : public ClockedObject, public Consumer
Stats::Histogram& getDelayVCHist(uint32_t index)
{ return *(m_delayVCHistogram[index]); }
- MessageBuffer *getPeerQueue(uint32_t pid)
- {
- std::map<uint32_t, MessageBuffer *>::iterator it =
- peerQueueMap.find(pid);
- assert(it != peerQueueMap.end());
- return (*it).second;
- }
-
protected:
//! Profiles original cache requests including PUTs
void profileRequest(const std::string &request);
//! Profiles the delay associated with messages.
void profileMsgDelay(uint32_t virtualNetwork, Cycles delay);
- //! Function for connecting peer controllers
- void connectWithPeer(AbstractController *);
- virtual void getQueuesFromPeer(AbstractController *)
- { fatal("getQueuesFromPeer() should be called only if implemented!"); }
-
void stallBuffer(MessageBuffer* buf, Address addr);
void wakeUpBuffers(Address addr);
void wakeUpAllBuffers(Address addr);
@@ -147,9 +137,6 @@ class AbstractController : public ClockedObject, public Consumer
unsigned int m_buffer_size;
Cycles m_recycle_latency;
- //! Map from physical network number to the Message Buffer.
- std::map<uint32_t, MessageBuffer*> peerQueueMap;
-
//! Counter for the number of cycles when the transitions carried out
//! were equal to the maximum allowed
Stats::Scalar m_fully_busy_cycles;
diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py
index 71fcc053f..736013612 100644
--- a/src/mem/slicc/symbols/StateMachine.py
+++ b/src/mem/slicc/symbols/StateMachine.py
@@ -51,7 +51,12 @@ class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None
+
+ # Data members in the State Machine that have been declared before
+ # the opening brace '{' of the machine. Note that these along with
+ # the members in self.objects form the entire set of data members.
self.config_parameters = config_parameters
+
self.prefetchers = []
for param in config_parameters:
@@ -74,6 +79,10 @@ class StateMachine(Symbol):
self.transitions = []
self.in_ports = []
self.functions = []
+
+ # Data members in the State Machine that have been declared inside
+ # the {} machine. Note that these along with the config params
+ # form the entire set of data members of the machine.
self.objects = []
self.TBEType = None
self.EntryType = None
@@ -200,7 +209,13 @@ class $py_ident(RubyController):
if param.rvalue is not None:
dflt_str = str(param.rvalue.inline()) + ', '
- if python_class_map.has_key(param.type_ast.type.c_ident):
+ if param.type_ast.type.c_ident == "MessageBuffer":
+ if param["network"] == "To":
+ code('${{param.ident}} = MasterPort(${dflt_str}"")')
+ else:
+ code('${{param.ident}} = SlavePort(${dflt_str}"")')
+
+ elif python_class_map.has_key(param.type_ast.type.c_ident):
python_type = python_class_map[param.type_ast.type.c_ident]
code('${{param.ident}} = Param.${{python_type}}(${dflt_str}"")')
@@ -241,13 +256,10 @@ class $py_ident(RubyController):
''')
seen_types = set()
- has_peer = False
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
- if "network" in var and "physical_network" in var:
- has_peer = True
- seen_types.add(var.type.ident)
+ seen_types.add(var.type.ident)
# for adding information to the protocol debug trace
code('''
@@ -260,7 +272,9 @@ class $c_ident : public AbstractController
$c_ident(const Params *p);
static int getNumControllers();
void init();
+
MessageBuffer* getMandatoryQueue() const;
+ void setNetQueue(const std::string& name, MessageBuffer *b);
void print(std::ostream& out) const;
void wakeup();
@@ -340,8 +354,6 @@ static int m_num_controllers;
if proto:
code('$proto')
- if has_peer:
- code('void getQueuesFromPeer(AbstractController *);')
if self.EntryType != None:
code('''
@@ -404,7 +416,6 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr);
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
- has_peer = False
code('''
/** \\file $c_ident.cc
@@ -486,10 +497,17 @@ $c_ident::$c_ident(const Params *p)
# include a sequencer, connect the it to the controller.
#
for param in self.config_parameters:
+
+ # Do not initialize messgage buffers since they are initialized
+ # when the port based connections are made.
+ if param.type_ast.type.c_ident == "MessageBuffer":
+ continue
+
if param.pointer:
code('m_${{param.ident}}_ptr = p->${{param.ident}};')
else:
code('m_${{param.ident}} = p->${{param.ident}};')
+
if re.compile("sequencer").search(param.ident):
code('m_${{param.ident}}_ptr->setController(this);')
@@ -499,19 +517,8 @@ $c_ident::$c_ident(const Params *p)
m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
m_${{var.ident}}_ptr->setReceiver(this);
''')
- else:
- if "network" in var and "physical_network" in var and \
- var["network"] == "To":
- has_peer = True
- code('''
-m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
-peerQueueMap[${{var["physical_network"]}}] = m_${{var.ident}}_ptr;
-m_${{var.ident}}_ptr->setSender(this);
-''')
code('''
-if (p->peer != NULL)
- connectWithPeer(p->peer);
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
@@ -528,16 +535,92 @@ for (int event = 0; event < ${ident}_Event_NUM; event++) {
}
void
-$c_ident::init()
+$c_ident::setNetQueue(const std::string& name, MessageBuffer *b)
{
- MachineType machine_type = string_to_MachineType("${{var.machine.ident}}");
+ MachineType machine_type = string_to_MachineType("${{self.ident}}");
int base M5_VAR_USED = MachineType_base_number(machine_type);
+''')
+ code.indent()
+
+ # set for maintaining the vnet, direction pairs already seen for this
+ # machine. This map helps in implementing the check for avoiding
+ # multiple message buffers being mapped to the same vnet.
+ vnet_dir_set = set()
+
+ for var in self.config_parameters:
+ if "network" in var:
+ vtype = var.type_ast.type
+ vid = "m_%s_ptr" % var.ident
+
+ code('''
+if ("${{var.ident}}" == name) {
+ $vid = b;
+ assert($vid != NULL);
+''')
+ code.indent()
+ # Network port object
+ network = var["network"]
+ ordered = var["ordered"]
+
+ if "virtual_network" in var:
+ vnet = var["virtual_network"]
+ vnet_type = var["vnet_type"]
+
+ assert (vnet, network) not in vnet_dir_set
+ vnet_dir_set.add((vnet,network))
+
+ code('''
+m_net_ptr->set${network}NetQueue(m_version + base, $ordered, $vnet,
+ "$vnet_type", b);
+''')
+ # Set the end
+ if network == "To":
+ code('$vid->setSender(this);')
+ else:
+ code('$vid->setReceiver(this);')
+
+ # Set ordering
+ code('$vid->setOrdering(${{var["ordered"]}});')
+
+ # Set randomization
+ if "random" in var:
+ # A buffer
+ code('$vid->setRandomization(${{var["random"]}});')
+
+ # Set Priority
+ if "rank" in var:
+ code('$vid->setPriority(${{var["rank"]}})')
+
+ # Set buffer size
+ code('$vid->resize(m_buffer_size);')
+
+ if "recycle_latency" in var:
+ code('$vid->setRecycleLatency( ' \
+ 'Cycles(${{var["recycle_latency"]}}));')
+ else:
+ code('$vid->setRecycleLatency(m_recycle_latency);')
+
+ # set description (may be overriden later by port def)
+ code('''
+$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
+''')
+ code.dedent()
+ code('}\n')
+
+ code.dedent()
+ code('''
+}
+
+void
+$c_ident::init()
+{
// initialize objects
''')
code.indent()
+
for var in self.objects:
vtype = var.type
vid = "m_%s_ptr" % var.ident
@@ -589,55 +672,6 @@ $c_ident::init()
code('$vid->setSender(this);')
code('$vid->setReceiver(this);')
- else:
- # Network port object
- network = var["network"]
- ordered = var["ordered"]
-
- if "virtual_network" in var:
- vnet = var["virtual_network"]
- vnet_type = var["vnet_type"]
-
- assert var.machine is not None
- code('''
-$vid = m_net_ptr->get${network}NetQueue(m_version + base, $ordered, $vnet, "$vnet_type");
-assert($vid != NULL);
-''')
-
- # Set the end
- if network == "To":
- code('$vid->setSender(this);')
- else:
- code('$vid->setReceiver(this);')
-
- # Set ordering
- if "ordered" in var:
- # A buffer
- code('$vid->setOrdering(${{var["ordered"]}});')
-
- # Set randomization
- if "random" in var:
- # A buffer
- code('$vid->setRandomization(${{var["random"]}});')
-
- # Set Priority
- if "rank" in var:
- code('$vid->setPriority(${{var["rank"]}})')
-
- # Set buffer size
- if vtype.isBuffer:
- code('''
-if (m_buffer_size > 0) {
- $vid->resize(m_buffer_size);
-}
-''')
-
- # set description (may be overriden later by port def)
- code('''
-$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
-
-''')
-
if vtype.isBuffer:
if "recycle_latency" in var:
code('$vid->setRecycleLatency( ' \
@@ -965,6 +999,13 @@ $c_ident::functionalReadBuffers(PacketPtr& pkt)
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('if ($vid->functionalRead(pkt)) { return true; }')
+
+ for var in self.config_parameters:
+ vtype = var.type_ast.type
+ if vtype.isBuffer:
+ vid = "m_%s_ptr" % var.ident
+ code('if ($vid->functionalRead(pkt)) { return true; }')
+
code('''
return false;
}
@@ -982,31 +1023,18 @@ $c_ident::functionalWriteBuffers(PacketPtr& pkt)
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('num_functional_writes += $vid->functionalWrite(pkt);')
+
+ for var in self.config_parameters:
+ vtype = var.type_ast.type
+ if vtype.isBuffer:
+ vid = "m_%s_ptr" % var.ident
+ code('num_functional_writes += $vid->functionalWrite(pkt);')
+
code('''
return num_functional_writes;
}
''')
- # Check if this controller has a peer, if yes then write the
- # function for connecting to the peer.
- if has_peer:
- code('''
-
-void
-$c_ident::getQueuesFromPeer(AbstractController *peer)
-{
-''')
- for var in self.objects:
- if "network" in var and "physical_network" in var and \
- var["network"] == "From":
- code('''
-m_${{var.ident}}_ptr = peer->getPeerQueue(${{var["physical_network"]}});
-assert(m_${{var.ident}}_ptr != NULL);
-m_${{var.ident}}_ptr->setReceiver(this);
-
-''')
- code('}')
-
code.write(path, "%s.cc" % c_ident)
def printCWakeup(self, path, includes):
diff --git a/src/python/swig/pyobject.cc b/src/python/swig/pyobject.cc
index fe849ec88..51bd1f62f 100644
--- a/src/python/swig/pyobject.cc
+++ b/src/python/swig/pyobject.cc
@@ -39,6 +39,7 @@
#include "dev/etherdevice.hh"
#include "dev/etherobject.hh"
#endif
+#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/mem_object.hh"
#include "python/swig/pyobject.hh"
#include "sim/full_system.hh"
@@ -98,6 +99,27 @@ connectPorts(SimObject *o1, const std::string &name1, int i1,
}
}
#endif
+
+ // These could be objects from the ruby memory system. If yes, then at
+ // least one of them should be an abstract controller. Do a type check.
+ AbstractController *ac1, *ac2;
+ ac1 = dynamic_cast<AbstractController*>(o1);
+ ac2 = dynamic_cast<AbstractController*>(o2);
+
+ if (ac1 || ac2) {
+ MessageBuffer *b = new MessageBuffer();
+
+ // set the message buffer associated with the provided names
+ if (ac1) {
+ ac1->setNetQueue(name1, b);
+ }
+ if (ac2) {
+ ac2->setNetQueue(name2, b);
+ }
+
+ return 1;
+ }
+
MemObject *mo1, *mo2;
mo1 = dynamic_cast<MemObject*>(o1);
mo2 = dynamic_cast<MemObject*>(o2);