summaryrefslogtreecommitdiff
path: root/configs
diff options
context:
space:
mode:
authorTony Gutierrez <anthony.gutierrez@amd.com>2016-01-19 14:28:22 -0500
committerTony Gutierrez <anthony.gutierrez@amd.com>2016-01-19 14:28:22 -0500
commit1a7d3f9fcb76a68540dd948f91413533a383bfde (patch)
tree867510a147cd095f19499d26b7c02d27de4cae9d /configs
parent28e353e0403ea379d244a418e8dc8ee0b48187cf (diff)
downloadgem5-1a7d3f9fcb76a68540dd948f91413533a383bfde.tar.xz
gpu-compute: AMD's baseline GPU model
Diffstat (limited to 'configs')
-rw-r--r--configs/common/GPUTLBConfig.py203
-rw-r--r--configs/common/GPUTLBOptions.py109
-rw-r--r--configs/example/apu_se.py499
-rw-r--r--configs/example/ruby_gpu_random_test.py187
-rw-r--r--configs/ruby/AMD_Base_Constructor.py134
-rw-r--r--configs/ruby/GPU_RfO.py751
-rw-r--r--configs/ruby/GPU_VIPER.py674
-rw-r--r--configs/ruby/GPU_VIPER_Baseline.py588
-rw-r--r--configs/ruby/GPU_VIPER_Region.py758
-rw-r--r--configs/ruby/MOESI_AMD_Base.py326
10 files changed, 4229 insertions, 0 deletions
diff --git a/configs/common/GPUTLBConfig.py b/configs/common/GPUTLBConfig.py
new file mode 100644
index 000000000..b7ea6dcf1
--- /dev/null
+++ b/configs/common/GPUTLBConfig.py
@@ -0,0 +1,203 @@
+#
+# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Lisa Hsu
+#
+
+# Configure the TLB hierarchy
+# Places which would probably need to be modified if you
+# want a different hierarchy are specified by a <Modify here .. >'
+# comment
+import m5
+from m5.objects import *
+
+def TLB_constructor(level):
+
+ constructor_call = "X86GPUTLB(size = options.L%(level)dTLBentries, \
+ assoc = options.L%(level)dTLBassoc, \
+ hitLatency = options.L%(level)dAccessLatency,\
+ missLatency2 = options.L%(level)dMissLatency,\
+ maxOutstandingReqs = options.L%(level)dMaxOutstandingReqs,\
+ accessDistance = options.L%(level)dAccessDistanceStat,\
+ clk_domain = SrcClockDomain(\
+ clock = options.GPUClock,\
+ voltage_domain = VoltageDomain(\
+ voltage = options.gpu_voltage)))" % locals()
+ return constructor_call
+
+def Coalescer_constructor(level):
+
+ constructor_call = "TLBCoalescer(probesPerCycle = \
+ options.L%(level)dProbesPerCycle, \
+ coalescingWindow = options.L%(level)dCoalescingWindow,\
+ disableCoalescing = options.L%(level)dDisableCoalescing,\
+ clk_domain = SrcClockDomain(\
+ clock = options.GPUClock,\
+ voltage_domain = VoltageDomain(\
+ voltage = options.gpu_voltage)))" % locals()
+ return constructor_call
+
+def create_TLB_Coalescer(options, my_level, my_index, TLB_name, Coalescer_name):
+ # arguments: options, TLB level, number of private structures for this Level,
+ # TLB name and Coalescer name
+ for i in xrange(my_index):
+ TLB_name.append(eval(TLB_constructor(my_level)))
+ Coalescer_name.append(eval(Coalescer_constructor(my_level)))
+
+def config_tlb_hierarchy(options, system, shader_idx):
+ n_cu = options.num_compute_units
+ # Make this configurable now, instead of the hard coded val. The dispatcher
+ # is always the last item in the system.cpu list.
+ dispatcher_idx = len(system.cpu) - 1
+
+ if options.TLB_config == "perLane":
+ num_TLBs = 64 * n_cu
+ elif options.TLB_config == "mono":
+ num_TLBs = 1
+ elif options.TLB_config == "perCU":
+ num_TLBs = n_cu
+ elif options.TLB_config == "2CU":
+ num_TLBs = n_cu >> 1
+ else:
+ print "Bad option for TLB Configuration."
+ sys.exit(1)
+
+ #----------------------------------------------------------------------------------------
+ # A visual representation of the TLB hierarchy
+ # for ease of configuration
+ # < Modify here the width and the number of levels if you want a different configuration >
+ # width is the number of TLBs of the given type (i.e., D-TLB, I-TLB etc) for this level
+ L1 = [{'name': 'sqc', 'width': options.num_sqc, 'TLBarray': [], 'CoalescerArray': []},
+ {'name': 'dispatcher', 'width': 1, 'TLBarray': [], 'CoalescerArray': []},
+ {'name': 'l1', 'width': num_TLBs, 'TLBarray': [], 'CoalescerArray': []}]
+
+ L2 = [{'name': 'l2', 'width': 1, 'TLBarray': [], 'CoalescerArray': []}]
+ L3 = [{'name': 'l3', 'width': 1, 'TLBarray': [], 'CoalescerArray': []}]
+
+ TLB_hierarchy = [L1, L2, L3]
+
+ #----------------------------------------------------------------------------------------
+ # Create the hiearchy
+ # Call the appropriate constructors and add objects to the system
+
+ for i in xrange(len(TLB_hierarchy)):
+ hierarchy_level = TLB_hierarchy[i]
+ level = i+1
+ for TLB_type in hierarchy_level:
+ TLB_index = TLB_type['width']
+ TLB_array = TLB_type['TLBarray']
+ Coalescer_array = TLB_type['CoalescerArray']
+ # If the sim calls for a fixed L1 TLB size across CUs,
+ # override the TLB entries option
+ if options.tot_L1TLB_size:
+ options.L1TLBentries = options.tot_L1TLB_size / num_TLBs
+ if options.L1TLBassoc > options.L1TLBentries:
+ options.L1TLBassoc = options.L1TLBentries
+ # call the constructors for the TLB and the Coalescer
+ create_TLB_Coalescer(options, level, TLB_index,\
+ TLB_array, Coalescer_array)
+
+ system_TLB_name = TLB_type['name'] + '_tlb'
+ system_Coalescer_name = TLB_type['name'] + '_coalescer'
+
+ # add the different TLB levels to the system
+ # Modify here if you want to make the TLB hierarchy a child of
+ # the shader.
+ exec('system.%s = TLB_array' % system_TLB_name)
+ exec('system.%s = Coalescer_array' % system_Coalescer_name)
+
+ #===========================================================
+ # Specify the TLB hierarchy (i.e., port connections)
+ # All TLBs but the last level TLB need to have a memSidePort (master)
+ #===========================================================
+
+ # Each TLB is connected with its Coalescer through a single port.
+ # There is a one-to-one mapping of TLBs to Coalescers at a given level
+ # This won't be modified no matter what the hierarchy looks like.
+ for i in xrange(len(TLB_hierarchy)):
+ hierarchy_level = TLB_hierarchy[i]
+ level = i+1
+ for TLB_type in hierarchy_level:
+ name = TLB_type['name']
+ for index in range(TLB_type['width']):
+ exec('system.%s_coalescer[%d].master[0] = \
+ system.%s_tlb[%d].slave[0]' % \
+ (name, index, name, index))
+
+ # Connect the cpuSidePort (slave) of all the coalescers in level 1
+ # < Modify here if you want a different configuration >
+ for TLB_type in L1:
+ name = TLB_type['name']
+ num_TLBs = TLB_type['width']
+ if name == 'l1': # L1 D-TLBs
+ tlb_per_cu = num_TLBs / n_cu
+ for cu_idx in range(n_cu):
+ if tlb_per_cu:
+ for tlb in range(tlb_per_cu):
+ exec('system.cpu[%d].CUs[%d].translation_port[%d] = \
+ system.l1_coalescer[%d].slave[%d]' % \
+ (shader_idx, cu_idx, tlb, cu_idx*tlb_per_cu+tlb, 0))
+ else:
+ exec('system.cpu[%d].CUs[%d].translation_port[%d] = \
+ system.l1_coalescer[%d].slave[%d]' % \
+ (shader_idx, cu_idx, tlb_per_cu, cu_idx / (n_cu / num_TLBs), cu_idx % (n_cu / num_TLBs)))
+
+ elif name == 'dispatcher': # Dispatcher TLB
+ for index in range(TLB_type['width']):
+ exec('system.cpu[%d].translation_port = \
+ system.dispatcher_coalescer[%d].slave[0]' % \
+ (dispatcher_idx, index))
+ elif name == 'sqc': # I-TLB
+ for index in range(n_cu):
+ sqc_tlb_index = index / options.cu_per_sqc
+ sqc_tlb_port_id = index % options.cu_per_sqc
+ exec('system.cpu[%d].CUs[%d].sqc_tlb_port = \
+ system.sqc_coalescer[%d].slave[%d]' % \
+ (shader_idx, index, sqc_tlb_index, sqc_tlb_port_id))
+
+
+ # Connect the memSidePorts (masters) of all the TLBs with the
+ # cpuSidePorts (slaves) of the Coalescers of the next level
+ # < Modify here if you want a different configuration >
+ # L1 <-> L2
+ l2_coalescer_index = 0
+ for TLB_type in L1:
+ name = TLB_type['name']
+ for index in range(TLB_type['width']):
+ exec('system.%s_tlb[%d].master[0] = \
+ system.l2_coalescer[0].slave[%d]' % \
+ (name, index, l2_coalescer_index))
+ l2_coalescer_index += 1
+ # L2 <-> L3
+ system.l2_tlb[0].master[0] = system.l3_coalescer[0].slave[0]
+
+ return system
diff --git a/configs/common/GPUTLBOptions.py b/configs/common/GPUTLBOptions.py
new file mode 100644
index 000000000..40a46d560
--- /dev/null
+++ b/configs/common/GPUTLBOptions.py
@@ -0,0 +1,109 @@
+#
+# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Myrto Papadopoulou
+#
+
+def tlb_options(parser):
+
+ #===================================================================
+ # TLB Configuration
+ #===================================================================
+
+ parser.add_option("--TLB-config", type="string", default="perCU",
+ help="Options are: perCU (default), mono, 2CU, or perLane")
+
+ #===================================================================
+ # L1 TLB Options (D-TLB, I-TLB, Dispatcher-TLB)
+ #===================================================================
+
+ parser.add_option("--L1TLBentries", type='int', default="32")
+ parser.add_option("--L1TLBassoc", type='int', default="32")
+ parser.add_option("--L1AccessLatency", type='int', default="1",
+ help="latency in gpu cycles")
+ parser.add_option("--L1MissLatency", type='int', default="750",
+ help="latency (in gpu cycles) of a page walk, "
+ "if this is a last level TLB")
+ parser.add_option("--L1MaxOutstandingReqs", type='int', default="64")
+ parser.add_option("--L1AccessDistanceStat", action="store_true")
+ parser.add_option("--tot-L1TLB-size", type="int", default="0")
+
+ #===================================================================
+ # L2 TLB Options
+ #===================================================================
+
+ parser.add_option("--L2TLBentries", type='int', default="4096")
+ parser.add_option("--L2TLBassoc", type='int', default="32")
+ parser.add_option("--L2AccessLatency", type='int', default="69",
+ help="latency in gpu cycles")
+ parser.add_option("--L2MissLatency", type='int', default="750",
+ help="latency (in gpu cycles) of a page walk, "
+ "if this is a last level TLB")
+ parser.add_option("--L2MaxOutstandingReqs", type='int', default="64")
+ parser.add_option("--L2AccessDistanceStat", action="store_true")
+
+ #===================================================================
+ # L3 TLB Options
+ #===================================================================
+
+ parser.add_option("--L3TLBentries", type='int', default="8192")
+ parser.add_option("--L3TLBassoc", type='int', default="32")
+ parser.add_option("--L3AccessLatency", type='int', default="150",
+ help="latency in gpu cycles")
+ parser.add_option("--L3MissLatency", type='int', default="750",
+ help="latency (in gpu cycles) of a page walk")
+ parser.add_option("--L3MaxOutstandingReqs", type='int', default="64")
+ parser.add_option("--L3AccessDistanceStat", action="store_true")
+
+ #===================================================================
+ # L1 TLBCoalescer Options
+ #===================================================================
+
+ parser.add_option("--L1ProbesPerCycle", type='int', default="2")
+ parser.add_option("--L1CoalescingWindow", type='int', default="1")
+ parser.add_option("--L1DisableCoalescing", action="store_true")
+
+ #===================================================================
+ # L2 TLBCoalescer Options
+ #===================================================================
+
+ parser.add_option("--L2ProbesPerCycle", type='int', default="2")
+ parser.add_option("--L2CoalescingWindow", type='int', default="1")
+ parser.add_option("--L2DisableCoalescing", action="store_true")
+
+ #===================================================================
+ # L3 TLBCoalescer Options
+ #===================================================================
+
+ parser.add_option("--L3ProbesPerCycle", type='int', default="2")
+ parser.add_option("--L3CoalescingWindow", type='int', default="1")
+ parser.add_option("--L3DisableCoalescing", action="store_true")
diff --git a/configs/example/apu_se.py b/configs/example/apu_se.py
new file mode 100644
index 000000000..75819b505
--- /dev/null
+++ b/configs/example/apu_se.py
@@ -0,0 +1,499 @@
+#
+# Copyright (c) 2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Sooraj Puthoor
+#
+
+import optparse, os, re
+import math
+import glob
+import inspect
+
+import m5
+from m5.objects import *
+from m5.util import addToPath
+
+addToPath('../ruby')
+addToPath('../common')
+addToPath('../topologies')
+
+import Options
+import Ruby
+import Simulation
+import GPUTLBOptions, GPUTLBConfig
+
+########################## Script Options ########################
+def setOption(parser, opt_str, value = 1):
+ # check to make sure the option actually exists
+ if not parser.has_option(opt_str):
+ raise Exception("cannot find %s in list of possible options" % opt_str)
+
+ opt = parser.get_option(opt_str)
+ # set the value
+ exec("parser.values.%s = %s" % (opt.dest, value))
+
+def getOption(parser, opt_str):
+ # check to make sure the option actually exists
+ if not parser.has_option(opt_str):
+ raise Exception("cannot find %s in list of possible options" % opt_str)
+
+ opt = parser.get_option(opt_str)
+ # get the value
+ exec("return_value = parser.values.%s" % opt.dest)
+ return return_value
+
+# Adding script options
+parser = optparse.OptionParser()
+Options.addCommonOptions(parser)
+Options.addSEOptions(parser)
+
+parser.add_option("--cpu-only-mode", action="store_true", default=False,
+ help="APU mode. Used to take care of problems in "\
+ "Ruby.py while running APU protocols")
+parser.add_option("-k", "--kernel-files",
+ help="file(s) containing GPU kernel code (colon separated)")
+parser.add_option("-u", "--num-compute-units", type="int", default=1,
+ help="number of GPU compute units"),
+parser.add_option("--num-cp", type="int", default=0,
+ help="Number of GPU Command Processors (CP)")
+parser.add_option("--benchmark-root", help="Root of benchmark directory tree")
+
+# not super important now, but to avoid putting the number 4 everywhere, make
+# it an option/knob
+parser.add_option("--cu-per-sqc", type="int", default=4, help="number of CUs" \
+ "sharing an SQC (icache, and thus icache TLB)")
+parser.add_option("--simds-per-cu", type="int", default=4, help="SIMD units" \
+ "per CU")
+parser.add_option("--wf-size", type="int", default=64,
+ help="Wavefront size(in workitems)")
+parser.add_option("--sp-bypass-path-length", type="int", default=4, \
+ help="Number of stages of bypass path in vector ALU for Single Precision ops")
+parser.add_option("--dp-bypass-path-length", type="int", default=4, \
+ help="Number of stages of bypass path in vector ALU for Double Precision ops")
+# issue period per SIMD unit: number of cycles before issuing another vector
+parser.add_option("--issue-period", type="int", default=4, \
+ help="Number of cycles per vector instruction issue period")
+parser.add_option("--glbmem-wr-bus-width", type="int", default=32, \
+ help="VGPR to Coalescer (Global Memory) data bus width in bytes")
+parser.add_option("--glbmem-rd-bus-width", type="int", default=32, \
+ help="Coalescer to VGPR (Global Memory) data bus width in bytes")
+# Currently we only support 1 local memory pipe
+parser.add_option("--shr-mem-pipes-per-cu", type="int", default=1, \
+ help="Number of Shared Memory pipelines per CU")
+# Currently we only support 1 global memory pipe
+parser.add_option("--glb-mem-pipes-per-cu", type="int", default=1, \
+ help="Number of Global Memory pipelines per CU")
+parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
+ "WF slots per SIMD")
+
+parser.add_option("--vreg-file-size", type="int", default=2048,
+ help="number of physical vector registers per SIMD")
+parser.add_option("--bw-scalor", type="int", default=0,
+ help="bandwidth scalor for scalability analysis")
+parser.add_option("--CPUClock", type="string", default="2GHz",
+ help="CPU clock")
+parser.add_option("--GPUClock", type="string", default="1GHz",
+ help="GPU clock")
+parser.add_option("--cpu-voltage", action="store", type="string",
+ default='1.0V',
+ help = """CPU voltage domain""")
+parser.add_option("--gpu-voltage", action="store", type="string",
+ default='1.0V',
+ help = """CPU voltage domain""")
+parser.add_option("--CUExecPolicy", type="string", default="OLDEST-FIRST",
+ help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)")
+parser.add_option("--xact-cas-mode", action="store_true",
+ help="enable load_compare mode (transactional CAS)")
+parser.add_option("--SegFaultDebug",action="store_true",
+ help="checks for GPU seg fault before TLB access")
+parser.add_option("--FunctionalTLB",action="store_true",
+ help="Assumes TLB has no latency")
+parser.add_option("--LocalMemBarrier",action="store_true",
+ help="Barrier does not wait for writethroughs to complete")
+parser.add_option("--countPages", action="store_true",
+ help="Count Page Accesses and output in per-CU output files")
+parser.add_option("--TLB-prefetch", type="int", help = "prefetch depth for"\
+ "TLBs")
+parser.add_option("--pf-type", type="string", help="type of prefetch: "\
+ "PF_CU, PF_WF, PF_PHASE, PF_STRIDE")
+parser.add_option("--pf-stride", type="int", help="set prefetch stride")
+parser.add_option("--numLdsBanks", type="int", default=32,
+ help="number of physical banks per LDS module")
+parser.add_option("--ldsBankConflictPenalty", type="int", default=1,
+ help="number of cycles per LDS bank conflict")
+
+
+Ruby.define_options(parser)
+
+#add TLB options to the parser
+GPUTLBOptions.tlb_options(parser)
+
+(options, args) = parser.parse_args()
+
+# The GPU cache coherence protocols only work with the backing store
+setOption(parser, "--access-backing-store")
+
+# if benchmark root is specified explicitly, that overrides the search path
+if options.benchmark_root:
+ benchmark_path = [options.benchmark_root]
+else:
+ # Set default benchmark search path to current dir
+ benchmark_path = ['.']
+
+########################## Sanity Check ########################
+
+# Currently the gpu model requires ruby
+if buildEnv['PROTOCOL'] == 'None':
+ fatal("GPU model requires ruby")
+
+# Currently the gpu model requires only timing or detailed CPU
+if not (options.cpu_type == "timing" or
+ options.cpu_type == "detailed"):
+ fatal("GPU model requires timing or detailed CPU")
+
+# This file can support multiple compute units
+assert(options.num_compute_units >= 1)
+
+# Currently, the sqc (I-Cache of GPU) is shared by
+# multiple compute units(CUs). The protocol works just fine
+# even if sqc is not shared. Overriding this option here
+# so that the user need not explicitly set this (assuming
+# sharing sqc is the common usage)
+n_cu = options.num_compute_units
+num_sqc = int(math.ceil(float(n_cu) / options.cu_per_sqc))
+options.num_sqc = num_sqc # pass this to Ruby
+
+########################## Creating the GPU system ########################
+# shader is the GPU
+shader = Shader(n_wf = options.wfs_per_simd,
+ clk_domain = SrcClockDomain(
+ clock = options.GPUClock,
+ voltage_domain = VoltageDomain(
+ voltage = options.gpu_voltage)))
+
+# GPU_RfO(Read For Ownership) implements SC/TSO memory model.
+# Other GPU protocols implement release consistency at GPU side.
+# So, all GPU protocols other than GPU_RfO should make their writes
+# visible to the global memory and should read from global memory
+# during kernal boundary. The pipeline initiates(or do not initiate)
+# the acquire/release operation depending on this impl_kern_boundary_sync
+# flag. This flag=true means pipeline initiates a acquire/release operation
+# at kernel boundary.
+if buildEnv['PROTOCOL'] == 'GPU_RfO':
+ shader.impl_kern_boundary_sync = False
+else:
+ shader.impl_kern_boundary_sync = True
+
+# Switching off per-lane TLB by default
+per_lane = False
+if options.TLB_config == "perLane":
+ per_lane = True
+
+# List of compute units; one GPU can have multiple compute units
+compute_units = []
+for i in xrange(n_cu):
+ compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
+ num_SIMDs = options.simds_per_cu,
+ wfSize = options.wf_size,
+ spbypass_pipe_length = options.sp_bypass_path_length,
+ dpbypass_pipe_length = options.dp_bypass_path_length,
+ issue_period = options.issue_period,
+ coalescer_to_vrf_bus_width = \
+ options.glbmem_rd_bus_width,
+ vrf_to_coalescer_bus_width = \
+ options.glbmem_wr_bus_width,
+ num_global_mem_pipes = \
+ options.glb_mem_pipes_per_cu,
+ num_shared_mem_pipes = \
+ options.shr_mem_pipes_per_cu,
+ n_wf = options.wfs_per_simd,
+ execPolicy = options.CUExecPolicy,
+ xactCasMode = options.xact_cas_mode,
+ debugSegFault = options.SegFaultDebug,
+ functionalTLB = options.FunctionalTLB,
+ localMemBarrier = options.LocalMemBarrier,
+ countPages = options.countPages,
+ localDataStore = \
+ LdsState(banks = options.numLdsBanks,
+ bankConflictPenalty = \
+ options.ldsBankConflictPenalty)))
+ wavefronts = []
+ vrfs = []
+ for j in xrange(options.simds_per_cu):
+ for k in xrange(shader.n_wf):
+ wavefronts.append(Wavefront(simdId = j, wf_slot_id = k))
+ vrfs.append(VectorRegisterFile(simd_id=j,
+ num_regs_per_simd=options.vreg_file_size))
+ compute_units[-1].wavefronts = wavefronts
+ compute_units[-1].vector_register_file = vrfs
+ if options.TLB_prefetch:
+ compute_units[-1].prefetch_depth = options.TLB_prefetch
+ compute_units[-1].prefetch_prev_type = options.pf_type
+
+ # attach the LDS and the CU to the bus (actually a Bridge)
+ compute_units[-1].ldsPort = compute_units[-1].ldsBus.slave
+ compute_units[-1].ldsBus.master = compute_units[-1].localDataStore.cuPort
+
+# Attach compute units to GPU
+shader.CUs = compute_units
+
+########################## Creating the CPU system ########################
+options.num_cpus = options.num_cpus
+
+# The shader core will be whatever is after the CPU cores are accounted for
+shader_idx = options.num_cpus
+
+# The command processor will be whatever is after the shader is accounted for
+cp_idx = shader_idx + 1
+cp_list = []
+
+# List of CPUs
+cpu_list = []
+
+# We only support timing mode for shader and memory
+shader.timing = True
+mem_mode = 'timing'
+
+# create the cpus
+for i in range(options.num_cpus):
+ cpu = None
+ if options.cpu_type == "detailed":
+ cpu = DerivO3CPU(cpu_id=i,
+ clk_domain = SrcClockDomain(
+ clock = options.CPUClock,
+ voltage_domain = VoltageDomain(
+ voltage = options.cpu_voltage)))
+ elif options.cpu_type == "timing":
+ cpu = TimingSimpleCPU(cpu_id=i,
+ clk_domain = SrcClockDomain(
+ clock = options.CPUClock,
+ voltage_domain = VoltageDomain(
+ voltage = options.cpu_voltage)))
+ else:
+ fatal("Atomic CPU not supported/tested")
+ cpu_list.append(cpu)
+
+# create the command processors
+for i in xrange(options.num_cp):
+ cp = None
+ if options.cpu_type == "detailed":
+ cp = DerivO3CPU(cpu_id = options.num_cpus + i,
+ clk_domain = SrcClockDomain(
+ clock = options.CPUClock,
+ voltage_domain = VoltageDomain(
+ voltage = options.cpu_voltage)))
+ elif options.cpu_type == 'timing':
+ cp = TimingSimpleCPU(cpu_id=options.num_cpus + i,
+ clk_domain = SrcClockDomain(
+ clock = options.CPUClock,
+ voltage_domain = VoltageDomain(
+ voltage = options.cpu_voltage)))
+ else:
+ fatal("Atomic CPU not supported/tested")
+ cp_list = cp_list + [cp]
+
+########################## Creating the GPU dispatcher ########################
+# Dispatcher dispatches work from host CPU to GPU
+host_cpu = cpu_list[0]
+dispatcher = GpuDispatcher()
+
+########################## Create and assign the workload ########################
+# Check for rel_path in elements of base_list using test, returning
+# the first full path that satisfies test
+def find_path(base_list, rel_path, test):
+ for base in base_list:
+ if not base:
+ # base could be None if environment var not set
+ continue
+ full_path = os.path.join(base, rel_path)
+ if test(full_path):
+ return full_path
+ fatal("%s not found in %s" % (rel_path, base_list))
+
+def find_file(base_list, rel_path):
+ return find_path(base_list, rel_path, os.path.isfile)
+
+executable = find_path(benchmark_path, options.cmd, os.path.exists)
+# it's common for a benchmark to be in a directory with the same
+# name as the executable, so we handle that automatically
+if os.path.isdir(executable):
+ benchmark_path = [executable]
+ executable = find_file(benchmark_path, options.cmd)
+if options.kernel_files:
+ kernel_files = [find_file(benchmark_path, f)
+ for f in options.kernel_files.split(':')]
+else:
+ # if kernel_files is not set, see if there's a unique .asm file
+ # in the same directory as the executable
+ kernel_path = os.path.dirname(executable)
+ kernel_files = glob.glob(os.path.join(kernel_path, '*.asm'))
+ if kernel_files:
+ print "Using GPU kernel code file(s)", ",".join(kernel_files)
+ else:
+ fatal("Can't locate kernel code (.asm) in " + kernel_path)
+
+# OpenCL driver
+driver = ClDriver(filename="hsa", codefile=kernel_files)
+for cpu in cpu_list:
+ cpu.workload = LiveProcess(executable = executable,
+ cmd = [options.cmd] + options.options.split(),
+ drivers = [driver])
+for cp in cp_list:
+ cp.workload = host_cpu.workload
+
+########################## Create the overall system ########################
+# Full list of processing cores in the system. Note that
+# dispatcher is also added to cpu_list although it is
+# not a processing element
+cpu_list = cpu_list + [shader] + cp_list + [dispatcher]
+
+# creating the overall system
+# notice the cpu list is explicitly added as a parameter to System
+system = System(cpu = cpu_list,
+ mem_ranges = [AddrRange(options.mem_size)],
+ cache_line_size = options.cacheline_size,
+ mem_mode = mem_mode)
+system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
+system.clk_domain = SrcClockDomain(clock = options.sys_clock,
+ voltage_domain = system.voltage_domain)
+
+# configure the TLB hierarchy
+GPUTLBConfig.config_tlb_hierarchy(options, system, shader_idx)
+
+# create Ruby system
+system.piobus = IOXBar(width=32, response_latency=0,
+ frontend_latency=0, forward_latency=0)
+Ruby.create_system(options, None, system)
+system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
+ voltage_domain = system.voltage_domain)
+
+# attach the CPU ports to Ruby
+for i in range(options.num_cpus):
+ ruby_port = system.ruby._cpu_ports[i]
+
+ # Create interrupt controller
+ system.cpu[i].createInterruptController()
+
+ # Connect cache port's to ruby
+ system.cpu[i].icache_port = ruby_port.slave
+ system.cpu[i].dcache_port = ruby_port.slave
+
+ ruby_port.mem_master_port = system.piobus.slave
+ if buildEnv['TARGET_ISA'] == "x86":
+ system.cpu[i].interrupts[0].pio = system.piobus.master
+ system.cpu[i].interrupts[0].int_master = system.piobus.slave
+ system.cpu[i].interrupts[0].int_slave = system.piobus.master
+
+# attach CU ports to Ruby
+# Because of the peculiarities of the CP core, you may have 1 CPU but 2
+# sequencers and thus 2 _cpu_ports created. Your GPUs shouldn't be
+# hooked up until after the CP. To make this script generic, figure out
+# the index as below, but note that this assumes there is one sequencer
+# per compute unit and one sequencer per SQC for the math to work out
+# correctly.
+gpu_port_idx = len(system.ruby._cpu_ports) \
+ - options.num_compute_units - options.num_sqc
+gpu_port_idx = gpu_port_idx - options.num_cp * 2
+
+wavefront_size = options.wf_size
+for i in xrange(n_cu):
+ # The pipeline issues wavefront_size number of uncoalesced requests
+ # in one GPU issue cycle. Hence wavefront_size mem ports.
+ for j in xrange(wavefront_size):
+ system.cpu[shader_idx].CUs[i].memory_port[j] = \
+ system.ruby._cpu_ports[gpu_port_idx].slave[j]
+ gpu_port_idx += 1
+
+for i in xrange(n_cu):
+ if i > 0 and not i % options.cu_per_sqc:
+ print "incrementing idx on ", i
+ gpu_port_idx += 1
+ system.cpu[shader_idx].CUs[i].sqc_port = \
+ system.ruby._cpu_ports[gpu_port_idx].slave
+gpu_port_idx = gpu_port_idx + 1
+
+# attach CP ports to Ruby
+for i in xrange(options.num_cp):
+ system.cpu[cp_idx].createInterruptController()
+ system.cpu[cp_idx].dcache_port = \
+ system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
+ system.cpu[cp_idx].icache_port = \
+ system.ruby._cpu_ports[gpu_port_idx + i * 2 + 1].slave
+ system.cpu[cp_idx].interrupts[0].pio = system.piobus.master
+ system.cpu[cp_idx].interrupts[0].int_master = system.piobus.slave
+ system.cpu[cp_idx].interrupts[0].int_slave = system.piobus.master
+ cp_idx = cp_idx + 1
+
+# connect dispatcher to the system.piobus
+dispatcher.pio = system.piobus.master
+dispatcher.dma = system.piobus.slave
+
+################# Connect the CPU and GPU via GPU Dispatcher ###################
+# CPU rings the GPU doorbell to notify a pending task
+# using this interface.
+# And GPU uses this interface to notify the CPU of task completion
+# The communcation happens through emulated driver.
+
+# Note this implicit setting of the cpu_pointer, shader_pointer and tlb array
+# parameters must be after the explicit setting of the System cpu list
+shader.cpu_pointer = host_cpu
+dispatcher.cpu = host_cpu
+dispatcher.shader_pointer = shader
+dispatcher.cl_driver = driver
+
+########################## Start simulation ########################
+
+root = Root(system=system, full_system=False)
+m5.ticks.setGlobalFrequency('1THz')
+if options.abs_max_tick:
+ maxtick = options.abs_max_tick
+else:
+ maxtick = m5.MaxTick
+
+# Benchmarks support work item annotations
+Simulation.setWorkCountOptions(system, options)
+
+# Checkpointing is not supported by APU model
+if (options.checkpoint_dir != None or
+ options.checkpoint_restore != None):
+ fatal("Checkpointing not supported by apu model")
+
+checkpoint_dir = None
+m5.instantiate(checkpoint_dir)
+
+# Map workload to this address space
+host_cpu.workload[0].map(0x10000000, 0x200000000, 4096)
+
+exit_event = m5.simulate(maxtick)
+print "Ticks:", m5.curTick()
+print 'Exiting because ', exit_event.getCause()
+sys.exit(exit_event.getCode())
diff --git a/configs/example/ruby_gpu_random_test.py b/configs/example/ruby_gpu_random_test.py
new file mode 100644
index 000000000..66ee4675f
--- /dev/null
+++ b/configs/example/ruby_gpu_random_test.py
@@ -0,0 +1,187 @@
+#
+# Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Brad Beckmann
+#
+
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from m5.util import addToPath
+import os, optparse, sys
+addToPath('../common')
+addToPath('../ruby')
+addToPath('../topologies')
+
+import Options
+import Ruby
+
+# Get paths we might need.
+config_path = os.path.dirname(os.path.abspath(__file__))
+config_root = os.path.dirname(config_path)
+m5_root = os.path.dirname(config_root)
+
+parser = optparse.OptionParser()
+Options.addCommonOptions(parser)
+
+parser.add_option("--maxloads", metavar="N", default=100,
+ help="Stop after N loads")
+parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
+ help="Wakeup every N cycles")
+parser.add_option("-u", "--num-compute-units", type="int", default=1,
+ help="number of compute units in the GPU")
+parser.add_option("--numCPs", type="int", default=0,
+ help="Number of GPU Command Processors (CP)")
+# not super important now, but to avoid putting the number 4 everywhere, make
+# it an option/knob
+parser.add_option("--cu-per-sqc", type="int", default=4, help="number of CUs \
+ sharing an SQC (icache, and thus icache TLB)")
+parser.add_option("--simds-per-cu", type="int", default=4, help="SIMD units" \
+ "per CU")
+parser.add_option("--wf-size", type="int", default=64,
+ help="Wavefront size(in workitems)")
+parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
+ "WF slots per SIMD")
+
+#
+# Add the ruby specific and protocol specific options
+#
+Ruby.define_options(parser)
+
+execfile(os.path.join(config_root, "common", "Options.py"))
+
+(options, args) = parser.parse_args()
+
+#
+# Set the default cache size and associativity to be very small to encourage
+# races between requests and writebacks.
+#
+options.l1d_size="256B"
+options.l1i_size="256B"
+options.l2_size="512B"
+options.l3_size="1kB"
+options.l1d_assoc=2
+options.l1i_assoc=2
+options.l2_assoc=2
+options.l3_assoc=2
+
+# This file can support multiple compute units
+assert(options.num_compute_units >= 1)
+n_cu = options.num_compute_units
+
+options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc)
+
+if args:
+ print "Error: script doesn't take any positional arguments"
+ sys.exit(1)
+
+#
+# Create the ruby random tester
+#
+
+# Check to for the GPU_RfO protocol. Other GPU protocols are non-SC and will
+# not work with the Ruby random tester.
+assert(buildEnv['PROTOCOL'] == 'GPU_RfO')
+
+# The GPU_RfO protocol does not support cache flushes
+check_flush = False
+
+tester = RubyTester(check_flush=check_flush,
+ checks_to_complete=options.maxloads,
+ wakeup_frequency=options.wakeup_freq,
+ deadlock_threshold=1000000)
+
+#
+# Create the M5 system. Note that the Memory Object isn't
+# actually used by the rubytester, but is included to support the
+# M5 memory size == Ruby memory size checks
+#
+system = System(cpu=tester, mem_ranges=[AddrRange(options.mem_size)])
+
+# Create a top-level voltage domain and clock domain
+system.voltage_domain = VoltageDomain(voltage=options.sys_voltage)
+
+system.clk_domain = SrcClockDomain(clock=options.sys_clock,
+ voltage_domain=system.voltage_domain)
+
+Ruby.create_system(options, False, system)
+
+# Create a seperate clock domain for Ruby
+system.ruby.clk_domain = SrcClockDomain(clock=options.ruby_clock,
+ voltage_domain=system.voltage_domain)
+
+tester.num_cpus = len(system.ruby._cpu_ports)
+
+#
+# The tester is most effective when randomization is turned on and
+# artifical delay is randomly inserted on messages
+#
+system.ruby.randomization = True
+
+for ruby_port in system.ruby._cpu_ports:
+
+ #
+ # Tie the ruby tester ports to the ruby cpu read and write ports
+ #
+ if ruby_port.support_data_reqs and ruby_port.support_inst_reqs:
+ tester.cpuInstDataPort = ruby_port.slave
+ elif ruby_port.support_data_reqs:
+ tester.cpuDataPort = ruby_port.slave
+ elif ruby_port.support_inst_reqs:
+ tester.cpuInstPort = ruby_port.slave
+
+ # Do not automatically retry stalled Ruby requests
+ ruby_port.no_retry_on_stall = True
+
+ #
+ # Tell each sequencer this is the ruby tester so that it
+ # copies the subblock back to the checker
+ #
+ ruby_port.using_ruby_tester = True
+
+# -----------------------
+# run simulation
+# -----------------------
+
+root = Root( full_system = False, system = system )
+root.system.mem_mode = 'timing'
+
+# Not much point in this being higher than the L1 latency
+m5.ticks.setGlobalFrequency('1ns')
+
+# instantiate configuration
+m5.instantiate()
+
+# simulate until program terminates
+exit_event = m5.simulate(options.abs_max_tick)
+
+print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
diff --git a/configs/ruby/AMD_Base_Constructor.py b/configs/ruby/AMD_Base_Constructor.py
new file mode 100644
index 000000000..d13153e9a
--- /dev/null
+++ b/configs/ruby/AMD_Base_Constructor.py
@@ -0,0 +1,134 @@
+#
+# Copyright (c) 2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Sooraj Puthoor, Lisa Hsu
+#
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from m5.util import convert
+from CntrlBase import *
+from Cluster import Cluster
+
+#
+# Note: the L1 Cache latency is only used by the sequencer on fast path hits
+#
+class L1Cache(RubyCache):
+ latency = 1
+ resourceStalls = False
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+#
+# Note: the L2 Cache latency is not currently used
+#
+class L2Cache(RubyCache):
+ latency = 10
+ resourceStalls = False
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+class CPCntrl(AMD_Base_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.cntrl_id = self.cntrlCount()
+
+ self.L1Icache = L1Cache()
+ self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
+ self.L1D0cache = L1Cache()
+ self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L1D1cache = L1Cache()
+ self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L2cache = L2Cache()
+ self.L2cache.create(options.l2_size, options.l2_assoc, options)
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1Icache
+ self.sequencer.dcache = self.L1D0cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.coreid = 0
+ self.sequencer.is_cpu_sequencer = True
+
+ self.sequencer1 = RubySequencer()
+ self.sequencer1.version = self.seqCount()
+ self.sequencer1.icache = self.L1Icache
+ self.sequencer1.dcache = self.L1D1cache
+ self.sequencer1.ruby_system = ruby_system
+ self.sequencer1.coreid = 1
+ self.sequencer1.is_cpu_sequencer = True
+
+ self.issue_latency = options.cpu_to_dir_latency
+ self.send_evictions = send_evicts(options)
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+def define_options(parser):
+ parser.add_option("--cpu-to-dir-latency", type="int", default=15)
+
+def construct(options, system, ruby_system):
+ if (buildEnv['PROTOCOL'] != 'GPU_VIPER' or
+ buildEnv['PROTOCOL'] != 'GPU_VIPER_Region' or
+ buildEnv['PROTOCOL'] != 'GPU_VIPER_Baseline'):
+ panic("This script requires VIPER based protocols \
+ to be built.")
+ cpu_sequencers = []
+ cpuCluster = None
+ cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
+ for i in xrange((options.num_cpus + 1) / 2):
+
+ cp_cntrl = CPCntrl()
+ cp_cntrl.create(options, ruby_system, system)
+
+ # Connect the CP controllers to the ruby network
+ cp_cntrl.requestFromCore = ruby_system.network.slave
+ cp_cntrl.responseFromCore = ruby_system.network.slave
+ cp_cntrl.unblockFromCore = ruby_system.network.slave
+ cp_cntrl.probeToCore = ruby_system.network.master
+ cp_cntrl.responseToCore = ruby_system.network.master
+
+ exec("system.cp_cntrl%d = cp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
+ cpuCluster.add(cp_cntrl)
+ return cpu_sequencers, cpuCluster
diff --git a/configs/ruby/GPU_RfO.py b/configs/ruby/GPU_RfO.py
new file mode 100644
index 000000000..bb14252f3
--- /dev/null
+++ b/configs/ruby/GPU_RfO.py
@@ -0,0 +1,751 @@
+#
+# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Lisa Hsu
+#
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from Ruby import create_topology
+from Ruby import send_evicts
+
+from Cluster import Cluster
+from Crossbar import Crossbar
+
+class CntrlBase:
+ _seqs = 0
+ @classmethod
+ def seqCount(cls):
+ # Use SeqCount not class since we need global count
+ CntrlBase._seqs += 1
+ return CntrlBase._seqs - 1
+
+ _cntrls = 0
+ @classmethod
+ def cntrlCount(cls):
+ # Use CntlCount not class since we need global count
+ CntrlBase._cntrls += 1
+ return CntrlBase._cntrls - 1
+
+ _version = 0
+ @classmethod
+ def versionCount(cls):
+ cls._version += 1 # Use count for this particular type
+ return cls._version - 1
+
+class TccDirCache(RubyCache):
+ size = "512kB"
+ assoc = 16
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.tcc_size)
+ self.size.value += (options.num_compute_units *
+ (MemorySize(options.tcp_size).value) *
+ options.tcc_dir_factor) / long(options.num_tccs)
+ self.start_index_bit = math.log(options.cacheline_size, 2) + \
+ math.log(options.num_tccs, 2)
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L1DCache(RubyCache):
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.l1d_size)
+ self.assoc = options.l1d_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L1ICache(RubyCache):
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.l1i_size)
+ self.assoc = options.l1i_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L2Cache(RubyCache):
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.l2_size)
+ self.assoc = options.l2_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+
+class CPCntrl(CorePair_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1Icache = L1ICache()
+ self.L1Icache.create(options)
+ self.L1D0cache = L1DCache()
+ self.L1D0cache.create(options)
+ self.L1D1cache = L1DCache()
+ self.L1D1cache.create(options)
+ self.L2cache = L2Cache()
+ self.L2cache.create(options)
+
+ self.sequencer = RubySequencer()
+ self.sequencer.icache_hit_latency = 2
+ self.sequencer.dcache_hit_latency = 2
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1Icache
+ self.sequencer.dcache = self.L1D0cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.coreid = 0
+ self.sequencer.is_cpu_sequencer = True
+
+ self.sequencer1 = RubySequencer()
+ self.sequencer1.version = self.seqCount()
+ self.sequencer1.icache = self.L1Icache
+ self.sequencer1.dcache = self.L1D1cache
+ self.sequencer1.icache_hit_latency = 2
+ self.sequencer1.dcache_hit_latency = 2
+ self.sequencer1.ruby_system = ruby_system
+ self.sequencer1.coreid = 1
+ self.sequencer1.is_cpu_sequencer = True
+
+ self.issue_latency = options.cpu_to_dir_latency
+ self.send_evictions = send_evicts(options)
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCPCache(RubyCache):
+ assoc = 8
+ dataArrayBanks = 16
+ tagArrayBanks = 4
+ dataAccessLatency = 4
+ tagAccessLatency = 1
+ def create(self, options):
+ self.size = MemorySize(options.tcp_size)
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class TCPCntrl(TCP_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+ self.L1cache.create(options)
+
+ self.coalescer = RubyGPUCoalescer()
+ self.coalescer.version = self.seqCount()
+ self.coalescer.icache = self.L1cache
+ self.coalescer.dcache = self.L1cache
+ self.coalescer.ruby_system = ruby_system
+ self.coalescer.support_inst_reqs = False
+ self.coalescer.is_cpu_sequencer = False
+ self.coalescer.max_outstanding_requests = options.simds_per_cu * \
+ options.wfs_per_simd * \
+ options.wf_size
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.is_cpu_sequencer = True
+
+ self.use_seq_not_coal = False
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def createCP(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+ self.L1cache.create(options)
+
+ self.coalescer = RubyGPUCoalescer()
+ self.coalescer.version = self.seqCount()
+ self.coalescer.icache = self.L1cache
+ self.coalescer.dcache = self.L1cache
+ self.coalescer.ruby_system = ruby_system
+ self.coalescer.support_inst_reqs = False
+ self.coalescer.is_cpu_sequencer = False
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.is_cpu_sequencer = True
+
+ self.use_seq_not_coal = True
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class SQCCache(RubyCache):
+ size = "32kB"
+ assoc = 8
+ dataArrayBanks = 16
+ tagArrayBanks = 4
+ dataAccessLatency = 4
+ tagAccessLatency = 1
+ def create(self, options):
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class SQCCntrl(SQC_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = SQCCache()
+ self.L1cache.create(options)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+
+ self.sequencer = RubySequencer()
+
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.support_data_reqs = False
+ self.sequencer.is_cpu_sequencer = False
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def createCP(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = SQCCache()
+ self.L1cache.create(options)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+
+ self.sequencer = RubySequencer()
+
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.support_data_reqs = False
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+
+class TCC(RubyCache):
+ assoc = 16
+ dataAccessLatency = 8
+ tagAccessLatency = 2
+ resourceStalls = True
+ def create(self, options):
+ self.size = MemorySize(options.tcc_size)
+ self.size = self.size / options.num_tccs
+ self.dataArrayBanks = 256 / options.num_tccs #number of data banks
+ self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
+ if ((self.size.value / long(self.assoc)) < 128):
+ self.size.value = long(128 * self.assoc)
+ self.start_index_bit = math.log(options.cacheline_size, 2) + \
+ math.log(options.num_tccs, 2)
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class TCCCntrl(TCC_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L2cache = TCC()
+ self.L2cache.create(options)
+ self.l2_response_latency = options.TCC_latency
+
+ self.number_of_TBEs = 2048
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_tccdir, resp_to_tccdir,
+ tcc_unblock_to_tccdir, req_to_tcc,
+ probe_to_tcc, resp_to_tcc):
+ self.w_reqToTCCDir = req_to_tccdir
+ self.w_respToTCCDir = resp_to_tccdir
+ self.w_TCCUnblockToTCCDir = tcc_unblock_to_tccdir
+ self.w_reqToTCC = req_to_tcc
+ self.w_probeToTCC = probe_to_tcc
+ self.w_respToTCC = resp_to_tcc
+
+class TCCDirCntrl(TCCdir_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.directory = TccDirCache()
+ self.directory.create(options)
+
+ self.number_of_TBEs = 1024
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_tccdir, resp_to_tccdir,
+ tcc_unblock_to_tccdir, req_to_tcc,
+ probe_to_tcc, resp_to_tcc):
+ self.w_reqToTCCDir = req_to_tccdir
+ self.w_respToTCCDir = resp_to_tccdir
+ self.w_TCCUnblockToTCCDir = tcc_unblock_to_tccdir
+ self.w_reqToTCC = req_to_tcc
+ self.w_probeToTCC = probe_to_tcc
+ self.w_respToTCC = resp_to_tcc
+
+class L3Cache(RubyCache):
+ assoc = 8
+ dataArrayBanks = 256
+ tagArrayBanks = 256
+
+ def create(self, options, ruby_system, system):
+ self.size = MemorySize(options.l3_size)
+ self.size.value /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataAccessLatency = options.l3_data_latency
+ self.tagAccessLatency = options.l3_tag_latency
+ self.resourceStalls = options.no_resource_stalls
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L3Cntrl(L3Cache_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L3cache = L3Cache()
+ self.L3cache.create(options, ruby_system, system)
+
+ self.l3_response_latency = max(self.L3cache.dataAccessLatency,
+ self.L3cache.tagAccessLatency)
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+class DirMem(RubyDirectoryMemory, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ phys_mem_size = AddrRange(options.mem_size).size()
+ mem_module_size = phys_mem_size / options.num_dirs
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+ self.size = dir_size
+
+class DirCntrl(Directory_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.response_latency = 30
+
+ self.directory = DirMem()
+ self.directory.create(options, ruby_system, system)
+
+ self.L3CacheMemory = L3Cache()
+ self.L3CacheMemory.create(options, ruby_system, system)
+
+ self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
+ self.L3CacheMemory.tagAccessLatency)
+
+ self.number_of_TBEs = options.num_tbes
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+
+
+def define_options(parser):
+ parser.add_option("--num-subcaches", type="int", default=4)
+ parser.add_option("--l3-data-latency", type="int", default=20)
+ parser.add_option("--l3-tag-latency", type="int", default=15)
+ parser.add_option("--cpu-to-dir-latency", type="int", default=15)
+ parser.add_option("--gpu-to-dir-latency", type="int", default=160)
+ parser.add_option("--no-resource-stalls", action="store_false",
+ default=True)
+ parser.add_option("--num-tbes", type="int", default=256)
+ parser.add_option("--l2-latency", type="int", default=50) # load to use
+ parser.add_option("--num-tccs", type="int", default=1,
+ help="number of TCC directories and banks in the GPU")
+ parser.add_option("--TCP_latency", type="int", default=4,
+ help="TCP latency")
+ parser.add_option("--TCC_latency", type="int", default=16,
+ help="TCC latency")
+ parser.add_option("--tcc-size", type='string', default='256kB',
+ help="agregate tcc size")
+ parser.add_option("--tcp-size", type='string', default='16kB',
+ help="tcp size")
+ parser.add_option("--tcc-dir-factor", type='int', default=4,
+ help="TCCdir size = factor *(TCPs + TCC)")
+
+def create_system(options, full_system, system, dma_devices, ruby_system):
+ if buildEnv['PROTOCOL'] != 'GPU_RfO':
+ panic("This script requires the GPU_RfO protocol to be built.")
+
+ cpu_sequencers = []
+
+ #
+ # The ruby network creation expects the list of nodes in the system to be
+ # consistent with the NetDest list. Therefore the l1 controller nodes
+ # must be listed before the directory nodes and directory nodes before
+ # dma nodes, etc.
+ #
+ cp_cntrl_nodes = []
+ tcp_cntrl_nodes = []
+ sqc_cntrl_nodes = []
+ tcc_cntrl_nodes = []
+ tccdir_cntrl_nodes = []
+ dir_cntrl_nodes = []
+ l3_cntrl_nodes = []
+
+ #
+ # Must create the individual controllers before the network to ensure the
+ # controller constructors are called before the network constructor
+ #
+
+ TCC_bits = int(math.log(options.num_tccs, 2))
+
+ # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
+ # Clusters
+ mainCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
+ for i in xrange(options.num_dirs):
+
+ dir_cntrl = DirCntrl(TCC_select_num_bits = TCC_bits)
+ dir_cntrl.create(options, ruby_system, system)
+ dir_cntrl.number_of_TBEs = 2560 * options.num_compute_units
+ #Enough TBEs for all TCP TBEs
+
+ # Connect the Directory controller to the ruby network
+ dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
+ dir_cntrl.requestFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.responseFromCores = MessageBuffer()
+ dir_cntrl.responseFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.unblockFromCores = MessageBuffer()
+ dir_cntrl.unblockFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.probeToCore = MessageBuffer()
+ dir_cntrl.probeToCore.master = ruby_system.network.slave
+
+ dir_cntrl.responseToCore = MessageBuffer()
+ dir_cntrl.responseToCore.master = ruby_system.network.slave
+
+ dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.responseFromMemory = MessageBuffer()
+
+ exec("system.dir_cntrl%d = dir_cntrl" % i)
+ dir_cntrl_nodes.append(dir_cntrl)
+
+ mainCluster.add(dir_cntrl)
+
+ # For an odd number of CPUs, still create the right number of controllers
+ cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
+ for i in xrange((options.num_cpus + 1) / 2):
+
+ cp_cntrl = CPCntrl()
+ cp_cntrl.create(options, ruby_system, system)
+
+ exec("system.cp_cntrl%d = cp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
+
+ # Connect the CP controllers and the network
+ cp_cntrl.requestFromCore = MessageBuffer()
+ cp_cntrl.requestFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.responseFromCore = MessageBuffer()
+ cp_cntrl.responseFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.unblockFromCore = MessageBuffer()
+ cp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.probeToCore = MessageBuffer()
+ cp_cntrl.probeToCore.slave = ruby_system.network.master
+
+ cp_cntrl.responseToCore = MessageBuffer()
+ cp_cntrl.responseToCore.slave = ruby_system.network.master
+
+ cp_cntrl.mandatoryQueue = MessageBuffer()
+ cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ cpuCluster.add(cp_cntrl)
+
+ gpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
+
+ for i in xrange(options.num_compute_units):
+
+ tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
+ number_of_TBEs = 2560) # max outstanding requests
+ tcp_cntrl.create(options, ruby_system, system)
+
+ exec("system.tcp_cntrl%d = tcp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(tcp_cntrl.coalescer)
+ tcp_cntrl_nodes.append(tcp_cntrl)
+
+ # Connect the TCP controller to the ruby network
+ tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
+ tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.probeToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.mandatoryQueue = MessageBuffer()
+
+ gpuCluster.add(tcp_cntrl)
+
+ for i in xrange(options.num_sqc):
+
+ sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
+ sqc_cntrl.create(options, ruby_system, system)
+
+ exec("system.sqc_cntrl%d = sqc_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(sqc_cntrl.sequencer)
+
+ # Connect the SQC controller to the ruby network
+ sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.responseFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.unblockFromCore = MessageBuffer(ordered = True)
+ sqc_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.probeToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.mandatoryQueue = MessageBuffer()
+
+ # SQC also in GPU cluster
+ gpuCluster.add(sqc_cntrl)
+
+ for i in xrange(options.numCPs):
+
+ tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
+ number_of_TBEs = 2560) # max outstanding requests
+ tcp_cntrl.createCP(options, ruby_system, system)
+
+ exec("system.tcp_cntrl%d = tcp_cntrl" % (options.num_compute_units + i))
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(tcp_cntrl.sequencer)
+ tcp_cntrl_nodes.append(tcp_cntrl)
+
+ # Connect the TCP controller to the ruby network
+ tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
+ tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.probeToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.mandatoryQueue = MessageBuffer()
+
+ gpuCluster.add(tcp_cntrl)
+
+ sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
+ sqc_cntrl.createCP(options, ruby_system, system)
+
+ exec("system.sqc_cntrl%d = sqc_cntrl" % (options.num_compute_units + i))
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(sqc_cntrl.sequencer)
+
+ # Connect the SQC controller to the ruby network
+ sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.responseFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.unblockFromCore = MessageBuffer(ordered = True)
+ sqc_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.probeToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.mandatoryQueue = MessageBuffer()
+
+ # SQC also in GPU cluster
+ gpuCluster.add(sqc_cntrl)
+
+ for i in xrange(options.num_tccs):
+
+ tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
+ number_of_TBEs = options.num_compute_units * 2560)
+ #Enough TBEs for all TCP TBEs
+ tcc_cntrl.create(options, ruby_system, system)
+ tcc_cntrl_nodes.append(tcc_cntrl)
+
+ tccdir_cntrl = TCCDirCntrl(TCC_select_num_bits = TCC_bits,
+ number_of_TBEs = options.num_compute_units * 2560)
+ #Enough TBEs for all TCP TBEs
+ tccdir_cntrl.create(options, ruby_system, system)
+ tccdir_cntrl_nodes.append(tccdir_cntrl)
+
+ exec("system.tcc_cntrl%d = tcc_cntrl" % i)
+ exec("system.tccdir_cntrl%d = tccdir_cntrl" % i)
+
+ # connect all of the wire buffers between L3 and dirs up
+ req_to_tccdir = RubyWireBuffer()
+ resp_to_tccdir = RubyWireBuffer()
+ tcc_unblock_to_tccdir = RubyWireBuffer()
+ req_to_tcc = RubyWireBuffer()
+ probe_to_tcc = RubyWireBuffer()
+ resp_to_tcc = RubyWireBuffer()
+
+ tcc_cntrl.connectWireBuffers(req_to_tccdir, resp_to_tccdir,
+ tcc_unblock_to_tccdir, req_to_tcc,
+ probe_to_tcc, resp_to_tcc)
+ tccdir_cntrl.connectWireBuffers(req_to_tccdir, resp_to_tccdir,
+ tcc_unblock_to_tccdir, req_to_tcc,
+ probe_to_tcc, resp_to_tcc)
+
+ # Connect the TCC controller to the ruby network
+ tcc_cntrl.responseFromTCC = MessageBuffer(ordered = True)
+ tcc_cntrl.responseFromTCC.master = ruby_system.network.slave
+
+ tcc_cntrl.responseToTCC = MessageBuffer(ordered = True)
+ tcc_cntrl.responseToTCC.slave = ruby_system.network.master
+
+ # Connect the TCC Dir controller to the ruby network
+ tccdir_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tccdir_cntrl.requestFromTCP.slave = ruby_system.network.master
+
+ tccdir_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tccdir_cntrl.responseFromTCP.slave = ruby_system.network.master
+
+ tccdir_cntrl.unblockFromTCP = MessageBuffer(ordered = True)
+ tccdir_cntrl.unblockFromTCP.slave = ruby_system.network.master
+
+ tccdir_cntrl.probeToCore = MessageBuffer(ordered = True)
+ tccdir_cntrl.probeToCore.master = ruby_system.network.slave
+
+ tccdir_cntrl.responseToCore = MessageBuffer(ordered = True)
+ tccdir_cntrl.responseToCore.master = ruby_system.network.slave
+
+ tccdir_cntrl.probeFromNB = MessageBuffer()
+ tccdir_cntrl.probeFromNB.slave = ruby_system.network.master
+
+ tccdir_cntrl.responseFromNB = MessageBuffer()
+ tccdir_cntrl.responseFromNB.slave = ruby_system.network.master
+
+ tccdir_cntrl.requestToNB = MessageBuffer()
+ tccdir_cntrl.requestToNB.master = ruby_system.network.slave
+
+ tccdir_cntrl.responseToNB = MessageBuffer()
+ tccdir_cntrl.responseToNB.master = ruby_system.network.slave
+
+ tccdir_cntrl.unblockToNB = MessageBuffer()
+ tccdir_cntrl.unblockToNB.master = ruby_system.network.slave
+
+ tccdir_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ # TCC cntrls added to the GPU cluster
+ gpuCluster.add(tcc_cntrl)
+ gpuCluster.add(tccdir_cntrl)
+
+ # Assuming no DMA devices
+ assert(len(dma_devices) == 0)
+
+ # Add cpu/gpu clusters to main cluster
+ mainCluster.add(cpuCluster)
+ mainCluster.add(gpuCluster)
+
+ ruby_system.network.number_of_virtual_networks = 10
+
+ return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
diff --git a/configs/ruby/GPU_VIPER.py b/configs/ruby/GPU_VIPER.py
new file mode 100644
index 000000000..f1384c404
--- /dev/null
+++ b/configs/ruby/GPU_VIPER.py
@@ -0,0 +1,674 @@
+#
+# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Lisa Hsu
+#
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from Ruby import create_topology
+from Ruby import send_evicts
+
+from Cluster import Cluster
+from Crossbar import Crossbar
+
+class CntrlBase:
+ _seqs = 0
+ @classmethod
+ def seqCount(cls):
+ # Use SeqCount not class since we need global count
+ CntrlBase._seqs += 1
+ return CntrlBase._seqs - 1
+
+ _cntrls = 0
+ @classmethod
+ def cntrlCount(cls):
+ # Use CntlCount not class since we need global count
+ CntrlBase._cntrls += 1
+ return CntrlBase._cntrls - 1
+
+ _version = 0
+ @classmethod
+ def versionCount(cls):
+ cls._version += 1 # Use count for this particular type
+ return cls._version - 1
+
+class L1Cache(RubyCache):
+ resourceStalls = False
+ dataArrayBanks = 2
+ tagArrayBanks = 2
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L2Cache(RubyCache):
+ resourceStalls = False
+ assoc = 16
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class CPCntrl(CorePair_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1Icache = L1Cache()
+ self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
+ self.L1D0cache = L1Cache()
+ self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L1D1cache = L1Cache()
+ self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L2cache = L2Cache()
+ self.L2cache.create(options.l2_size, options.l2_assoc, options)
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1Icache
+ self.sequencer.dcache = self.L1D0cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.coreid = 0
+ self.sequencer.is_cpu_sequencer = True
+
+ self.sequencer1 = RubySequencer()
+ self.sequencer1.version = self.seqCount()
+ self.sequencer1.icache = self.L1Icache
+ self.sequencer1.dcache = self.L1D1cache
+ self.sequencer1.ruby_system = ruby_system
+ self.sequencer1.coreid = 1
+ self.sequencer1.is_cpu_sequencer = True
+
+ self.issue_latency = options.cpu_to_dir_latency
+ self.send_evictions = send_evicts(options)
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCPCache(RubyCache):
+ size = "16kB"
+ assoc = 16
+ dataArrayBanks = 16 #number of data banks
+ tagArrayBanks = 16 #number of tag banks
+ dataAccessLatency = 4
+ tagAccessLatency = 1
+ def create(self, options):
+ self.size = MemorySize(options.tcp_size)
+ self.assoc = options.tcp_assoc
+ self.resourceStalls = options.no_tcc_resource_stalls
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class TCPCntrl(TCP_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
+ dataAccessLatency = options.TCP_latency)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+ self.L1cache.create(options)
+ self.issue_latency = 1
+
+ self.coalescer = VIPERCoalescer()
+ self.coalescer.version = self.seqCount()
+ self.coalescer.icache = self.L1cache
+ self.coalescer.dcache = self.L1cache
+ self.coalescer.ruby_system = ruby_system
+ self.coalescer.support_inst_reqs = False
+ self.coalescer.is_cpu_sequencer = False
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.is_cpu_sequencer = True
+
+ self.use_seq_not_coal = False
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def createCP(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
+ dataAccessLatency = options.TCP_latency)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+ self.L1cache.create(options)
+ self.issue_latency = 1
+
+ self.coalescer = VIPERCoalescer()
+ self.coalescer.version = self.seqCount()
+ self.coalescer.icache = self.L1cache
+ self.coalescer.dcache = self.L1cache
+ self.coalescer.ruby_system = ruby_system
+ self.coalescer.support_inst_reqs = False
+ self.coalescer.is_cpu_sequencer = False
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.is_cpu_sequencer = True
+
+ self.use_seq_not_coal = True
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class SQCCache(RubyCache):
+ dataArrayBanks = 8
+ tagArrayBanks = 8
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+
+ def create(self, options):
+ self.size = MemorySize(options.sqc_size)
+ self.assoc = options.sqc_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class SQCCntrl(SQC_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1cache = SQCCache()
+ self.L1cache.create(options)
+ self.L1cache.resourceStalls = options.no_resource_stalls
+
+ self.sequencer = RubySequencer()
+
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.support_data_reqs = False
+ self.sequencer.is_cpu_sequencer = False
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCC(RubyCache):
+ size = MemorySize("256kB")
+ assoc = 16
+ dataAccessLatency = 8
+ tagAccessLatency = 2
+ resourceStalls = True
+ def create(self, options):
+ self.assoc = options.tcc_assoc
+ if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
+ s = options.num_compute_units
+ tcc_size = s * 128
+ tcc_size = str(tcc_size)+'kB'
+ self.size = MemorySize(tcc_size)
+ self.dataArrayBanks = 64
+ self.tagArrayBanks = 64
+ else:
+ self.size = MemorySize(options.tcc_size)
+ self.dataArrayBanks = 256 / options.num_tccs #number of data banks
+ self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
+ self.size.value = self.size.value / options.num_tccs
+ if ((self.size.value / long(self.assoc)) < 128):
+ self.size.value = long(128 * self.assoc)
+ self.start_index_bit = math.log(options.cacheline_size, 2) + \
+ math.log(options.num_tccs, 2)
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+
+class TCCCntrl(TCC_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L2cache = TCC()
+ self.L2cache.create(options)
+ self.L2cache.resourceStalls = options.no_tcc_resource_stalls
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class L3Cache(RubyCache):
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+
+ def create(self, options, ruby_system, system):
+ self.size = MemorySize(options.l3_size)
+ self.size.value /= options.num_dirs
+ self.assoc = options.l3_assoc
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataAccessLatency = options.l3_data_latency
+ self.tagAccessLatency = options.l3_tag_latency
+ self.resourceStalls = False
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L3Cntrl(L3Cache_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L3cache = L3Cache()
+ self.L3cache.create(options, ruby_system, system)
+
+ self.l3_response_latency = max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+class DirMem(RubyDirectoryMemory, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ phys_mem_size = AddrRange(options.mem_size).size()
+ mem_module_size = phys_mem_size / options.num_dirs
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+ self.size = dir_size
+
+class DirCntrl(Directory_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.response_latency = 30
+
+ self.directory = DirMem()
+ self.directory.create(options, ruby_system, system)
+
+ self.L3CacheMemory = L3Cache()
+ self.L3CacheMemory.create(options, ruby_system, system)
+
+ self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
+ self.L3CacheMemory.tagAccessLatency)
+
+ self.number_of_TBEs = options.num_tbes
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+def define_options(parser):
+ parser.add_option("--num-subcaches", type = "int", default = 4)
+ parser.add_option("--l3-data-latency", type = "int", default = 20)
+ parser.add_option("--l3-tag-latency", type = "int", default = 15)
+ parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
+ parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
+ parser.add_option("--no-resource-stalls", action = "store_false",
+ default = True)
+ parser.add_option("--no-tcc-resource-stalls", action = "store_false",
+ default = True)
+ parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
+ parser.add_option("--num-tbes", type = "int", default = 256)
+ parser.add_option("--l2-latency", type = "int", default = 50) # load to use
+ parser.add_option("--num-tccs", type = "int", default = 1,
+ help = "number of TCC banks in the GPU")
+ parser.add_option("--sqc-size", type = 'string', default = '32kB',
+ help = "SQC cache size")
+ parser.add_option("--sqc-assoc", type = 'int', default = 8,
+ help = "SQC cache assoc")
+ parser.add_option("--WB_L1", action = "store_true", default = False,
+ help = "writeback L1")
+ parser.add_option("--WB_L2", action = "store_true", default = False,
+ help = "writeback L2")
+ parser.add_option("--TCP_latency", type = "int", default = 4,
+ help = "TCP latency")
+ parser.add_option("--TCC_latency", type = "int", default = 16,
+ help = "TCC latency")
+ parser.add_option("--tcc-size", type = 'string', default = '256kB',
+ help = "agregate tcc size")
+ parser.add_option("--tcc-assoc", type = 'int', default = 16,
+ help = "tcc assoc")
+ parser.add_option("--tcp-size", type = 'string', default = '16kB',
+ help = "tcp size")
+ parser.add_option("--tcp-assoc", type = 'int', default = 16,
+ help = "tcp assoc")
+ parser.add_option("--noL1", action = "store_true", default = False,
+ help = "bypassL1")
+
+def create_system(options, full_system, system, dma_devices, ruby_system):
+ if buildEnv['PROTOCOL'] != 'GPU_VIPER':
+ panic("This script requires the GPU_VIPER protocol to be built.")
+
+ cpu_sequencers = []
+
+ #
+ # The ruby network creation expects the list of nodes in the system to be
+ # consistent with the NetDest list. Therefore the l1 controller nodes
+ # must be listed before the directory nodes and directory nodes before
+ # dma nodes, etc.
+ #
+ cp_cntrl_nodes = []
+ tcp_cntrl_nodes = []
+ sqc_cntrl_nodes = []
+ tcc_cntrl_nodes = []
+ dir_cntrl_nodes = []
+ l3_cntrl_nodes = []
+
+ #
+ # Must create the individual controllers before the network to ensure the
+ # controller constructors are called before the network constructor
+ #
+
+ # For an odd number of CPUs, still create the right number of controllers
+ TCC_bits = int(math.log(options.num_tccs, 2))
+
+ # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
+ # Clusters
+ crossbar_bw = None
+ mainCluster = None
+ if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
+ #Assuming a 2GHz clock
+ crossbar_bw = 16 * options.num_compute_units * options.bw_scalor
+ mainCluster = Cluster(intBW=crossbar_bw)
+ else:
+ mainCluster = Cluster(intBW=8) # 16 GB/s
+ for i in xrange(options.num_dirs):
+
+ dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
+ dir_cntrl.create(options, ruby_system, system)
+ dir_cntrl.number_of_TBEs = options.num_tbes
+ dir_cntrl.useL3OnWT = options.use_L3_on_WT
+ # the number_of_TBEs is inclusive of TBEs below
+
+ # Connect the Directory controller to the ruby network
+ dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
+ dir_cntrl.requestFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.responseFromCores = MessageBuffer()
+ dir_cntrl.responseFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.unblockFromCores = MessageBuffer()
+ dir_cntrl.unblockFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.probeToCore = MessageBuffer()
+ dir_cntrl.probeToCore.master = ruby_system.network.slave
+
+ dir_cntrl.responseToCore = MessageBuffer()
+ dir_cntrl.responseToCore.master = ruby_system.network.slave
+
+ dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.responseFromMemory = MessageBuffer()
+
+ exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
+ dir_cntrl_nodes.append(dir_cntrl)
+
+ mainCluster.add(dir_cntrl)
+
+ cpuCluster = None
+ if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
+ cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
+ else:
+ cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
+ for i in xrange((options.num_cpus + 1) / 2):
+
+ cp_cntrl = CPCntrl()
+ cp_cntrl.create(options, ruby_system, system)
+
+ exec("ruby_system.cp_cntrl%d = cp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
+
+ # Connect the CP controllers and the network
+ cp_cntrl.requestFromCore = MessageBuffer()
+ cp_cntrl.requestFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.responseFromCore = MessageBuffer()
+ cp_cntrl.responseFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.unblockFromCore = MessageBuffer()
+ cp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.probeToCore = MessageBuffer()
+ cp_cntrl.probeToCore.slave = ruby_system.network.master
+
+ cp_cntrl.responseToCore = MessageBuffer()
+ cp_cntrl.responseToCore.slave = ruby_system.network.master
+
+ cp_cntrl.mandatoryQueue = MessageBuffer()
+ cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ cpuCluster.add(cp_cntrl)
+
+ gpuCluster = None
+ if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
+ gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
+ else:
+ gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
+ for i in xrange(options.num_compute_units):
+
+ tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
+ issue_latency = 1,
+ number_of_TBEs = 2560)
+ # TBEs set to max outstanding requests
+ tcp_cntrl.create(options, ruby_system, system)
+ tcp_cntrl.WB = options.WB_L1
+ tcp_cntrl.disableL1 = options.noL1
+ tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
+ tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
+
+ exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(tcp_cntrl.coalescer)
+ tcp_cntrl_nodes.append(tcp_cntrl)
+
+ # Connect the TCP controller to the ruby network
+ tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.unblockFromCore = MessageBuffer()
+ tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.probeToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.mandatoryQueue = MessageBuffer()
+
+ gpuCluster.add(tcp_cntrl)
+
+ for i in xrange(options.num_sqc):
+
+ sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
+ sqc_cntrl.create(options, ruby_system, system)
+
+ exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(sqc_cntrl.sequencer)
+
+ # Connect the SQC controller to the ruby network
+ sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.probeToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.mandatoryQueue = MessageBuffer()
+
+ # SQC also in GPU cluster
+ gpuCluster.add(sqc_cntrl)
+
+ for i in xrange(options.numCPs):
+
+ tcp_ID = options.num_compute_units + i
+ sqc_ID = options.num_sqc + i
+
+ tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
+ issue_latency = 1,
+ number_of_TBEs = 2560)
+ # TBEs set to max outstanding requests
+ tcp_cntrl.createCP(options, ruby_system, system)
+ tcp_cntrl.WB = options.WB_L1
+ tcp_cntrl.disableL1 = options.noL1
+ tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
+ tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
+
+ exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % tcp_ID)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(tcp_cntrl.sequencer)
+ tcp_cntrl_nodes.append(tcp_cntrl)
+
+ # Connect the CP (TCP) controllers to the ruby network
+ tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
+ tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.probeToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.mandatoryQueue = MessageBuffer()
+
+ gpuCluster.add(tcp_cntrl)
+
+ sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
+ sqc_cntrl.create(options, ruby_system, system)
+
+ exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(sqc_cntrl.sequencer)
+
+ # SQC also in GPU cluster
+ gpuCluster.add(sqc_cntrl)
+
+ for i in xrange(options.num_tccs):
+
+ tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
+ tcc_cntrl.create(options, ruby_system, system)
+ tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
+ tcc_cntrl.l2_response_latency = options.TCC_latency
+ tcc_cntrl_nodes.append(tcc_cntrl)
+ tcc_cntrl.WB = options.WB_L2
+ tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
+ # the number_of_TBEs is inclusive of TBEs below
+
+ # Connect the TCC controllers to the ruby network
+ tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
+
+ tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
+ tcc_cntrl.responseToCore.master = ruby_system.network.slave
+
+ tcc_cntrl.probeFromNB = MessageBuffer()
+ tcc_cntrl.probeFromNB.slave = ruby_system.network.master
+
+ tcc_cntrl.responseFromNB = MessageBuffer()
+ tcc_cntrl.responseFromNB.slave = ruby_system.network.master
+
+ tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
+ tcc_cntrl.requestToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.responseToNB = MessageBuffer()
+ tcc_cntrl.responseToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.unblockToNB = MessageBuffer()
+ tcc_cntrl.unblockToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i)
+
+ # connect all of the wire buffers between L3 and dirs up
+ # TCC cntrls added to the GPU cluster
+ gpuCluster.add(tcc_cntrl)
+
+ # Assuming no DMA devices
+ assert(len(dma_devices) == 0)
+
+ # Add cpu/gpu clusters to main cluster
+ mainCluster.add(cpuCluster)
+ mainCluster.add(gpuCluster)
+
+ ruby_system.network.number_of_virtual_networks = 10
+
+ return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
diff --git a/configs/ruby/GPU_VIPER_Baseline.py b/configs/ruby/GPU_VIPER_Baseline.py
new file mode 100644
index 000000000..879b34e88
--- /dev/null
+++ b/configs/ruby/GPU_VIPER_Baseline.py
@@ -0,0 +1,588 @@
+#
+# Copyright (c) 2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Sooraj Puthoor
+#
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from Ruby import create_topology
+from Ruby import send_evicts
+
+from Cluster import Cluster
+from Crossbar import Crossbar
+
+class CntrlBase:
+ _seqs = 0
+ @classmethod
+ def seqCount(cls):
+ # Use SeqCount not class since we need global count
+ CntrlBase._seqs += 1
+ return CntrlBase._seqs - 1
+
+ _cntrls = 0
+ @classmethod
+ def cntrlCount(cls):
+ # Use CntlCount not class since we need global count
+ CntrlBase._cntrls += 1
+ return CntrlBase._cntrls - 1
+
+ _version = 0
+ @classmethod
+ def versionCount(cls):
+ cls._version += 1 # Use count for this particular type
+ return cls._version - 1
+
+class L1Cache(RubyCache):
+ resourceStalls = False
+ dataArrayBanks = 2
+ tagArrayBanks = 2
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L2Cache(RubyCache):
+ resourceStalls = False
+ assoc = 16
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class CPCntrl(CorePair_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1Icache = L1Cache()
+ self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
+ self.L1D0cache = L1Cache()
+ self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L1D1cache = L1Cache()
+ self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L2cache = L2Cache()
+ self.L2cache.create(options.l2_size, options.l2_assoc, options)
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1Icache
+ self.sequencer.dcache = self.L1D0cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.coreid = 0
+ self.sequencer.is_cpu_sequencer = True
+
+ self.sequencer1 = RubySequencer()
+ self.sequencer1.version = self.seqCount()
+ self.sequencer1.icache = self.L1Icache
+ self.sequencer1.dcache = self.L1D1cache
+ self.sequencer1.ruby_system = ruby_system
+ self.sequencer1.coreid = 1
+ self.sequencer1.is_cpu_sequencer = True
+
+ self.issue_latency = options.cpu_to_dir_latency
+ self.send_evictions = send_evicts(options)
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCPCache(RubyCache):
+ size = "16kB"
+ assoc = 16
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+ dataAccessLatency = 4
+ tagAccessLatency = 1
+ def create(self, options):
+ self.size = MemorySize(options.tcp_size)
+ self.dataArrayBanks = 16
+ self.tagArrayBanks = 16
+ self.dataAccessLatency = 4
+ self.tagAccessLatency = 1
+ self.resourceStalls = options.no_tcc_resource_stalls
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class TCPCntrl(TCP_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L1cache = TCPCache()
+ self.L1cache.create(options)
+ self.issue_latency = 1
+
+ self.coalescer = VIPERCoalescer()
+ self.coalescer.version = self.seqCount()
+ self.coalescer.icache = self.L1cache
+ self.coalescer.dcache = self.L1cache
+ self.coalescer.ruby_system = ruby_system
+ self.coalescer.support_inst_reqs = False
+ self.coalescer.is_cpu_sequencer = False
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.is_cpu_sequencer = True
+
+ self.use_seq_not_coal = False
+
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class SQCCache(RubyCache):
+ dataArrayBanks = 8
+ tagArrayBanks = 8
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+
+ def create(self, options):
+ self.size = MemorySize(options.sqc_size)
+ self.assoc = options.sqc_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class SQCCntrl(SQC_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L1cache = SQCCache()
+ self.L1cache.create(options)
+ self.L1cache.resourceStalls = False
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.support_data_reqs = False
+ self.sequencer.is_cpu_sequencer = False
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCC(RubyCache):
+ size = MemorySize("256kB")
+ assoc = 16
+ dataAccessLatency = 8
+ tagAccessLatency = 2
+ resourceStalls = True
+ def create(self, options):
+ self.assoc = options.tcc_assoc
+ if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
+ s = options.num_compute_units
+ tcc_size = s * 128
+ tcc_size = str(tcc_size)+'kB'
+ self.size = MemorySize(tcc_size)
+ self.dataArrayBanks = 64
+ self.tagArrayBanks = 64
+ else:
+ self.size = MemorySize(options.tcc_size)
+ self.dataArrayBanks = 256 / options.num_tccs #number of data banks
+ self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
+ self.size.value = self.size.value / options.num_tccs
+ if ((self.size.value / long(self.assoc)) < 128):
+ self.size.value = long(128 * self.assoc)
+ self.start_index_bit = math.log(options.cacheline_size, 2) + \
+ math.log(options.num_tccs, 2)
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class TCCCntrl(TCC_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L2cache = TCC()
+ self.L2cache.create(options)
+ self.ruby_system = ruby_system
+ self.L2cache.resourceStalls = options.no_tcc_resource_stalls
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class L3Cache(RubyCache):
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+
+ def create(self, options, ruby_system, system):
+ self.size = MemorySize(options.l3_size)
+ self.size.value /= options.num_dirs
+ self.assoc = options.l3_assoc
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataAccessLatency = options.l3_data_latency
+ self.tagAccessLatency = options.l3_tag_latency
+ self.resourceStalls = False
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class ProbeFilter(RubyCache):
+ size = "4MB"
+ assoc = 16
+ dataArrayBanks = 256
+ tagArrayBanks = 256
+
+ def create(self, options, ruby_system, system):
+ self.block_size = "%dB" % (64 * options.blocks_per_region)
+ self.size = options.region_dir_entries * \
+ self.block_size * options.num_compute_units
+ self.assoc = 8
+ self.tagArrayBanks = 8
+ self.tagAccessLatency = options.dir_tag_latency
+ self.dataAccessLatency = 1
+ self.resourceStalls = options.no_resource_stalls
+ self.start_index_bit = 6 + int(math.log(options.blocks_per_region, 2))
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L3Cntrl(L3Cache_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L3cache = L3Cache()
+ self.L3cache.create(options, ruby_system, system)
+ self.l3_response_latency = \
+ max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+class DirMem(RubyDirectoryMemory, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ phys_mem_size = AddrRange(options.mem_size).size()
+ mem_module_size = phys_mem_size / options.num_dirs
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+ self.size = dir_size
+
+class DirCntrl(Directory_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.response_latency = 30
+ self.directory = DirMem()
+ self.directory.create(options, ruby_system, system)
+ self.L3CacheMemory = L3Cache()
+ self.L3CacheMemory.create(options, ruby_system, system)
+ self.ProbeFilterMemory = ProbeFilter()
+ self.ProbeFilterMemory.create(options, ruby_system, system)
+ self.l3_hit_latency = \
+ max(self.L3CacheMemory.dataAccessLatency,
+ self.L3CacheMemory.tagAccessLatency)
+
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+def define_options(parser):
+ parser.add_option("--num-subcaches", type = "int", default = 4)
+ parser.add_option("--l3-data-latency", type = "int", default = 20)
+ parser.add_option("--l3-tag-latency", type = "int", default = 15)
+ parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
+ parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
+ parser.add_option("--no-resource-stalls", action = "store_false",
+ default = True)
+ parser.add_option("--no-tcc-resource-stalls", action = "store_false",
+ default = True)
+ parser.add_option("--num-tbes", type = "int", default = 2560)
+ parser.add_option("--l2-latency", type = "int", default = 50) # load to use
+ parser.add_option("--num-tccs", type = "int", default = 1,
+ help = "number of TCC banks in the GPU")
+ parser.add_option("--sqc-size", type = 'string', default = '32kB',
+ help = "SQC cache size")
+ parser.add_option("--sqc-assoc", type = 'int', default = 8,
+ help = "SQC cache assoc")
+ parser.add_option("--region-dir-entries", type = "int", default = 8192)
+ parser.add_option("--dir-tag-latency", type = "int", default = 8)
+ parser.add_option("--dir-tag-banks", type = "int", default = 4)
+ parser.add_option("--blocks-per-region", type = "int", default = 1)
+ parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
+ parser.add_option("--nonInclusiveDir", action = "store_true",
+ default = False)
+ parser.add_option("--WB_L1", action = "store_true",
+ default = False, help = "writeback L2")
+ parser.add_option("--WB_L2", action = "store_true",
+ default = False, help = "writeback L2")
+ parser.add_option("--TCP_latency", type = "int",
+ default = 4, help = "TCP latency")
+ parser.add_option("--TCC_latency", type = "int",
+ default = 16, help = "TCC latency")
+ parser.add_option("--tcc-size", type = 'string', default = '2MB',
+ help = "agregate tcc size")
+ parser.add_option("--tcc-assoc", type = 'int', default = 16,
+ help = "tcc assoc")
+ parser.add_option("--tcp-size", type = 'string', default = '16kB',
+ help = "tcp size")
+ parser.add_option("--sampler-sets", type = "int", default = 1024)
+ parser.add_option("--sampler-assoc", type = "int", default = 16)
+ parser.add_option("--sampler-counter", type = "int", default = 512)
+ parser.add_option("--noL1", action = "store_true", default = False,
+ help = "bypassL1")
+ parser.add_option("--noL2", action = "store_true", default = False,
+ help = "bypassL2")
+
+def create_system(options, full_system, system, dma_devices, ruby_system):
+ if buildEnv['PROTOCOL'] != 'GPU_VIPER_Baseline':
+ panic("This script requires the" \
+ "GPU_VIPER_Baseline protocol to be built.")
+
+ cpu_sequencers = []
+
+ #
+ # The ruby network creation expects the list of nodes in the system to be
+ # consistent with the NetDest list. Therefore the l1 controller nodes
+ # must be listed before the directory nodes and directory nodes before
+ # dma nodes, etc.
+ #
+ cp_cntrl_nodes = []
+ tcp_cntrl_nodes = []
+ sqc_cntrl_nodes = []
+ tcc_cntrl_nodes = []
+ dir_cntrl_nodes = []
+ l3_cntrl_nodes = []
+
+ #
+ # Must create the individual controllers before the network to ensure the
+ # controller constructors are called before the network constructor
+ #
+
+ # For an odd number of CPUs, still create the right number of controllers
+ TCC_bits = int(math.log(options.num_tccs, 2))
+
+ # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
+ # Clusters
+ crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
+ mainCluster = Cluster(intBW = crossbar_bw)
+ for i in xrange(options.num_dirs):
+
+ dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
+ dir_cntrl.create(options, ruby_system, system)
+ dir_cntrl.number_of_TBEs = options.num_tbes
+ dir_cntrl.useL3OnWT = options.use_L3_on_WT
+ dir_cntrl.inclusiveDir = not options.nonInclusiveDir
+
+ # Connect the Directory controller to the ruby network
+ dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
+ dir_cntrl.requestFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.responseFromCores = MessageBuffer()
+ dir_cntrl.responseFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.unblockFromCores = MessageBuffer()
+ dir_cntrl.unblockFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.probeToCore = MessageBuffer()
+ dir_cntrl.probeToCore.master = ruby_system.network.slave
+
+ dir_cntrl.responseToCore = MessageBuffer()
+ dir_cntrl.responseToCore.master = ruby_system.network.slave
+
+ dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.responseFromMemory = MessageBuffer()
+
+ exec("system.dir_cntrl%d = dir_cntrl" % i)
+ dir_cntrl_nodes.append(dir_cntrl)
+ mainCluster.add(dir_cntrl)
+
+ cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw)
+ for i in xrange((options.num_cpus + 1) / 2):
+
+ cp_cntrl = CPCntrl()
+ cp_cntrl.create(options, ruby_system, system)
+
+ exec("system.cp_cntrl%d = cp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
+
+ # Connect the CP controllers and the network
+ cp_cntrl.requestFromCore = MessageBuffer()
+ cp_cntrl.requestFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.responseFromCore = MessageBuffer()
+ cp_cntrl.responseFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.unblockFromCore = MessageBuffer()
+ cp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.probeToCore = MessageBuffer()
+ cp_cntrl.probeToCore.slave = ruby_system.network.master
+
+ cp_cntrl.responseToCore = MessageBuffer()
+ cp_cntrl.responseToCore.slave = ruby_system.network.master
+
+ cp_cntrl.mandatoryQueue = MessageBuffer()
+ cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ cpuCluster.add(cp_cntrl)
+
+ gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
+ for i in xrange(options.num_compute_units):
+
+ tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
+ issue_latency = 1,
+ number_of_TBEs = 2560)
+ # TBEs set to max outstanding requests
+ tcp_cntrl.create(options, ruby_system, system)
+ tcp_cntrl.WB = options.WB_L1
+ tcp_cntrl.disableL1 = options.noL1
+
+ exec("system.tcp_cntrl%d = tcp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(tcp_cntrl.coalescer)
+ tcp_cntrl_nodes.append(tcp_cntrl)
+
+ # Connect the CP (TCP) controllers to the ruby network
+ tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.unblockFromCore = MessageBuffer()
+ tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.probeToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.mandatoryQueue = MessageBuffer()
+
+ gpuCluster.add(tcp_cntrl)
+
+ for i in xrange(options.num_sqc):
+
+ sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
+ sqc_cntrl.create(options, ruby_system, system)
+
+ exec("system.sqc_cntrl%d = sqc_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(sqc_cntrl.sequencer)
+
+ # Connect the SQC controller to the ruby network
+ sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.probeToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.mandatoryQueue = MessageBuffer()
+
+ # SQC also in GPU cluster
+ gpuCluster.add(sqc_cntrl)
+
+ # Because of wire buffers, num_tccs must equal num_tccdirs
+ numa_bit = 6
+
+ for i in xrange(options.num_tccs):
+
+ tcc_cntrl = TCCCntrl()
+ tcc_cntrl.create(options, ruby_system, system)
+ tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
+ tcc_cntrl.l2_response_latency = options.TCC_latency
+ tcc_cntrl_nodes.append(tcc_cntrl)
+ tcc_cntrl.WB = options.WB_L2
+ tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
+
+ # Connect the TCC controllers to the ruby network
+ tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
+
+ tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
+ tcc_cntrl.responseToCore.master = ruby_system.network.slave
+
+ tcc_cntrl.probeFromNB = MessageBuffer()
+ tcc_cntrl.probeFromNB.slave = ruby_system.network.master
+
+ tcc_cntrl.responseFromNB = MessageBuffer()
+ tcc_cntrl.responseFromNB.slave = ruby_system.network.master
+
+ tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
+ tcc_cntrl.requestToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.responseToNB = MessageBuffer()
+ tcc_cntrl.responseToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.unblockToNB = MessageBuffer()
+ tcc_cntrl.unblockToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ exec("system.tcc_cntrl%d = tcc_cntrl" % i)
+ # connect all of the wire buffers between L3 and dirs up
+ # TCC cntrls added to the GPU cluster
+ gpuCluster.add(tcc_cntrl)
+
+ # Assuming no DMA devices
+ assert(len(dma_devices) == 0)
+
+ # Add cpu/gpu clusters to main cluster
+ mainCluster.add(cpuCluster)
+ mainCluster.add(gpuCluster)
+
+ ruby_system.network.number_of_virtual_networks = 10
+
+ return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
diff --git a/configs/ruby/GPU_VIPER_Region.py b/configs/ruby/GPU_VIPER_Region.py
new file mode 100644
index 000000000..94cb9b70b
--- /dev/null
+++ b/configs/ruby/GPU_VIPER_Region.py
@@ -0,0 +1,758 @@
+#
+# Copyright (c) 2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Sooraj Puthoor
+#
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from Ruby import send_evicts
+
+from Cluster import Cluster
+
+class CntrlBase:
+ _seqs = 0
+ @classmethod
+ def seqCount(cls):
+ # Use SeqCount not class since we need global count
+ CntrlBase._seqs += 1
+ return CntrlBase._seqs - 1
+
+ _cntrls = 0
+ @classmethod
+ def cntrlCount(cls):
+ # Use CntlCount not class since we need global count
+ CntrlBase._cntrls += 1
+ return CntrlBase._cntrls - 1
+
+ _version = 0
+ @classmethod
+ def versionCount(cls):
+ cls._version += 1 # Use count for this particular type
+ return cls._version - 1
+
+#
+# Note: the L1 Cache latency is only used by the sequencer on fast path hits
+#
+class L1Cache(RubyCache):
+ resourceStalls = False
+ dataArrayBanks = 2
+ tagArrayBanks = 2
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L2Cache(RubyCache):
+ resourceStalls = False
+ assoc = 16
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+ def create(self, size, assoc, options):
+ self.size = MemorySize(size)
+ self.assoc = assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class CPCntrl(CorePair_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1Icache = L1Cache()
+ self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
+ self.L1D0cache = L1Cache()
+ self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L1D1cache = L1Cache()
+ self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
+ self.L2cache = L2Cache()
+ self.L2cache.create(options.l2_size, options.l2_assoc, options)
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1Icache
+ self.sequencer.dcache = self.L1D0cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.coreid = 0
+ self.sequencer.is_cpu_sequencer = True
+
+ self.sequencer1 = RubySequencer()
+ self.sequencer1.version = self.seqCount()
+ self.sequencer1.icache = self.L1Icache
+ self.sequencer1.dcache = self.L1D1cache
+ self.sequencer1.ruby_system = ruby_system
+ self.sequencer1.coreid = 1
+ self.sequencer1.is_cpu_sequencer = True
+
+ self.issue_latency = 1
+ self.send_evictions = send_evicts(options)
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCPCache(RubyCache):
+ size = "16kB"
+ assoc = 16
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+ dataAccessLatency = 4
+ tagAccessLatency = 1
+ def create(self, options):
+ self.size = MemorySize(options.tcp_size)
+ self.dataArrayBanks = 16
+ self.tagArrayBanks = 16
+ self.dataAccessLatency = 4
+ self.tagAccessLatency = 1
+ self.resourceStalls = options.no_tcc_resource_stalls
+ self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
+
+class TCPCntrl(TCP_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L1cache = TCPCache(dataAccessLatency = options.TCP_latency)
+ self.L1cache.create(options)
+ self.issue_latency = 1
+
+ self.coalescer = VIPERCoalescer()
+ self.coalescer.version = self.seqCount()
+ self.coalescer.icache = self.L1cache
+ self.coalescer.dcache = self.L1cache
+ self.coalescer.ruby_system = ruby_system
+ self.coalescer.support_inst_reqs = False
+ self.coalescer.is_cpu_sequencer = False
+
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.is_cpu_sequencer = True
+
+ self.use_seq_not_coal = False
+
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class SQCCache(RubyCache):
+ dataArrayBanks = 8
+ tagArrayBanks = 8
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+
+ def create(self, options):
+ self.size = MemorySize(options.sqc_size)
+ self.assoc = options.sqc_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
+
+class SQCCntrl(SQC_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L1cache = SQCCache()
+ self.L1cache.create(options)
+ self.L1cache.resourceStalls = False
+ self.sequencer = RubySequencer()
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1cache
+ self.sequencer.dcache = self.L1cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.support_data_reqs = False
+ self.sequencer.is_cpu_sequencer = False
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class TCC(RubyCache):
+ size = MemorySize("256kB")
+ assoc = 16
+ dataAccessLatency = 8
+ tagAccessLatency = 2
+ resourceStalls = False
+ def create(self, options):
+ self.assoc = options.tcc_assoc
+ if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
+ s = options.num_compute_units
+ tcc_size = s * 128
+ tcc_size = str(tcc_size)+'kB'
+ self.size = MemorySize(tcc_size)
+ self.dataArrayBanks = 64
+ self.tagArrayBanks = 64
+ else:
+ self.size = MemorySize(options.tcc_size)
+ self.dataArrayBanks = 256 / options.num_tccs #number of data banks
+ self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
+ self.size.value = self.size.value / options.num_tccs
+ if ((self.size.value / long(self.assoc)) < 128):
+ self.size.value = long(128 * self.assoc)
+ self.start_index_bit = math.log(options.cacheline_size, 2) + \
+ math.log(options.num_tccs, 2)
+ self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
+
+class TCCCntrl(TCC_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L2cache = TCC()
+ self.L2cache.create(options)
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class L3Cache(RubyCache):
+ dataArrayBanks = 16
+ tagArrayBanks = 16
+
+ def create(self, options, ruby_system, system):
+ self.size = MemorySize(options.l3_size)
+ self.size.value /= options.num_dirs
+ self.assoc = options.l3_assoc
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataAccessLatency = options.l3_data_latency
+ self.tagAccessLatency = options.l3_tag_latency
+ self.resourceStalls = False
+ self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
+
+class L3Cntrl(L3Cache_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L3cache = L3Cache()
+ self.L3cache.create(options, ruby_system, system)
+ self.l3_response_latency = \
+ max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+# Directory memory: Directory memory of infinite size which is
+# used by directory controller to store the "states" of the
+# state machine. The state machine is implemented per cache block
+class DirMem(RubyDirectoryMemory, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ phys_mem_size = AddrRange(options.mem_size).size()
+ mem_module_size = phys_mem_size / options.num_dirs
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+ self.size = dir_size
+
+# Directory controller: Contains directory memory, L3 cache and associated state
+# machine which is used to accurately redirect a data request to L3 cache or to
+# memory. The permissions requests do not come to this directory for region
+# based protocols as they are handled exclusively by the region directory.
+# However, region directory controller uses this directory controller for
+# sending probe requests and receiving probe responses.
+class DirCntrl(Directory_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.response_latency = 25
+ self.response_latency_regionDir = 1
+ self.directory = DirMem()
+ self.directory.create(options, ruby_system, system)
+ self.L3CacheMemory = L3Cache()
+ self.L3CacheMemory.create(options, ruby_system, system)
+ self.l3_hit_latency = \
+ max(self.L3CacheMemory.dataAccessLatency,
+ self.L3CacheMemory.tagAccessLatency)
+
+ self.ruby_system = ruby_system
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+# Region directory : Stores region permissions
+class RegionDir(RubyCache):
+
+ def create(self, options, ruby_system, system):
+ self.block_size = "%dB" % (64 * options.blocks_per_region)
+ self.size = options.region_dir_entries * \
+ self.block_size * options.num_compute_units
+ self.assoc = 8
+ self.tagArrayBanks = 8
+ self.tagAccessLatency = options.dir_tag_latency
+ self.dataAccessLatency = 1
+ self.resourceStalls = options.no_resource_stalls
+ self.start_index_bit = 6 + int(math.log(options.blocks_per_region, 2))
+ self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
+# Region directory controller : Contains region directory and associated state
+# machine for dealing with region coherence requests.
+class RegionCntrl(RegionDir_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.cacheMemory = RegionDir()
+ self.cacheMemory.create(options, ruby_system, system)
+ self.blocksPerRegion = options.blocks_per_region
+ self.toDirLatency = \
+ max(self.cacheMemory.dataAccessLatency,
+ self.cacheMemory.tagAccessLatency)
+ self.ruby_system = ruby_system
+ self.always_migrate = options.always_migrate
+ self.sym_migrate = options.symmetric_migrate
+ self.asym_migrate = options.asymmetric_migrate
+ if self.always_migrate:
+ assert(not self.asym_migrate and not self.sym_migrate)
+ if self.sym_migrate:
+ assert(not self.always_migrate and not self.asym_migrate)
+ if self.asym_migrate:
+ assert(not self.always_migrate and not self.sym_migrate)
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+# Region Buffer: A region directory cache which avoids some potential
+# long latency lookup of region directory for getting region permissions
+class RegionBuffer(RubyCache):
+ assoc = 4
+ dataArrayBanks = 256
+ tagArrayBanks = 256
+ dataAccessLatency = 1
+ tagAccessLatency = 1
+ resourceStalls = True
+
+class RBCntrl(RegionBuffer_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.cacheMemory = RegionBuffer()
+ self.cacheMemory.resourceStalls = options.no_tcc_resource_stalls
+ self.cacheMemory.dataArrayBanks = 64
+ self.cacheMemory.tagArrayBanks = 64
+ self.blocksPerRegion = options.blocks_per_region
+ self.cacheMemory.block_size = "%dB" % (64 * self.blocksPerRegion)
+ self.cacheMemory.start_index_bit = \
+ 6 + int(math.log(self.blocksPerRegion, 2))
+ self.cacheMemory.size = options.region_buffer_entries * \
+ self.cacheMemory.block_size * options.num_compute_units
+ self.toDirLatency = options.gpu_to_dir_latency
+ self.toRegionDirLatency = options.cpu_to_dir_latency
+ self.noTCCdir = True
+ TCC_bits = int(math.log(options.num_tccs, 2))
+ self.TCC_select_num_bits = TCC_bits
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+ self.cacheMemory.replacement_policy = \
+ PseudoLRUReplacementPolicy(assoc = self.cacheMemory.assoc)
+
+def define_options(parser):
+ parser.add_option("--num-subcaches", type="int", default=4)
+ parser.add_option("--l3-data-latency", type="int", default=20)
+ parser.add_option("--l3-tag-latency", type="int", default=15)
+ parser.add_option("--cpu-to-dir-latency", type="int", default=120)
+ parser.add_option("--gpu-to-dir-latency", type="int", default=60)
+ parser.add_option("--no-resource-stalls", action="store_false",
+ default=True)
+ parser.add_option("--no-tcc-resource-stalls", action="store_false",
+ default=True)
+ parser.add_option("--num-tbes", type="int", default=32)
+ parser.add_option("--l2-latency", type="int", default=50) # load to use
+ parser.add_option("--num-tccs", type="int", default=1,
+ help="number of TCC banks in the GPU")
+
+ parser.add_option("--sqc-size", type='string', default='32kB',
+ help="SQC cache size")
+ parser.add_option("--sqc-assoc", type='int', default=8,
+ help="SQC cache assoc")
+
+ parser.add_option("--WB_L1", action="store_true",
+ default=False, help="L2 Writeback Cache")
+ parser.add_option("--WB_L2", action="store_true",
+ default=False, help="L2 Writeback Cache")
+ parser.add_option("--TCP_latency",
+ type="int", default=4, help="TCP latency")
+ parser.add_option("--TCC_latency",
+ type="int", default=16, help="TCC latency")
+ parser.add_option("--tcc-size", type='string', default='2MB',
+ help="agregate tcc size")
+ parser.add_option("--tcc-assoc", type='int', default=16,
+ help="tcc assoc")
+ parser.add_option("--tcp-size", type='string', default='16kB',
+ help="tcp size")
+
+ parser.add_option("--dir-tag-latency", type="int", default=4)
+ parser.add_option("--dir-tag-banks", type="int", default=4)
+ parser.add_option("--blocks-per-region", type="int", default=16)
+ parser.add_option("--dir-entries", type="int", default=8192)
+
+ # Region buffer is a cache of region directory. Hence region
+ # directory is inclusive with respect to region directory.
+ # However, region directory is non-inclusive with respect to
+ # the caches in the system
+ parser.add_option("--region-dir-entries", type="int", default=1024)
+ parser.add_option("--region-buffer-entries", type="int", default=512)
+
+ parser.add_option("--always-migrate",
+ action="store_true", default=False)
+ parser.add_option("--symmetric-migrate",
+ action="store_true", default=False)
+ parser.add_option("--asymmetric-migrate",
+ action="store_true", default=False)
+ parser.add_option("--use-L3-on-WT", action="store_true", default=False)
+
+def create_system(options, full_system, system, dma_devices, ruby_system):
+ if buildEnv['PROTOCOL'] != 'GPU_VIPER_Region':
+ panic("This script requires the GPU_VIPER_Region protocol to be built.")
+
+ cpu_sequencers = []
+
+ #
+ # The ruby network creation expects the list of nodes in the system to be
+ # consistent with the NetDest list. Therefore the l1 controller nodes
+ # must be listed before the directory nodes and directory nodes before
+ # dma nodes, etc.
+ #
+ dir_cntrl_nodes = []
+
+ # For an odd number of CPUs, still create the right number of controllers
+ TCC_bits = int(math.log(options.num_tccs, 2))
+
+ #
+ # Must create the individual controllers before the network to ensure the
+ # controller constructors are called before the network constructor
+ #
+
+ # For an odd number of CPUs, still create the right number of controllers
+ crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
+ cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
+ for i in xrange((options.num_cpus + 1) / 2):
+
+ cp_cntrl = CPCntrl()
+ cp_cntrl.create(options, ruby_system, system)
+
+ rb_cntrl = RBCntrl()
+ rb_cntrl.create(options, ruby_system, system)
+ rb_cntrl.number_of_TBEs = 256
+ rb_cntrl.isOnCPU = True
+
+ cp_cntrl.regionBufferNum = rb_cntrl.version
+
+ exec("system.cp_cntrl%d = cp_cntrl" % i)
+ exec("system.rb_cntrl%d = rb_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
+
+ # Connect the CP controllers and the network
+ cp_cntrl.requestFromCore = MessageBuffer()
+ cp_cntrl.requestFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.responseFromCore = MessageBuffer()
+ cp_cntrl.responseFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.unblockFromCore = MessageBuffer()
+ cp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.probeToCore = MessageBuffer()
+ cp_cntrl.probeToCore.slave = ruby_system.network.master
+
+ cp_cntrl.responseToCore = MessageBuffer()
+ cp_cntrl.responseToCore.slave = ruby_system.network.master
+
+ cp_cntrl.mandatoryQueue = MessageBuffer()
+ cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ # Connect the RB controllers to the ruby network
+ rb_cntrl.requestFromCore = MessageBuffer(ordered = True)
+ rb_cntrl.requestFromCore.slave = ruby_system.network.master
+
+ rb_cntrl.responseFromCore = MessageBuffer()
+ rb_cntrl.responseFromCore.slave = ruby_system.network.master
+
+ rb_cntrl.requestToNetwork = MessageBuffer()
+ rb_cntrl.requestToNetwork.master = ruby_system.network.slave
+
+ rb_cntrl.notifyFromRegionDir = MessageBuffer()
+ rb_cntrl.notifyFromRegionDir.slave = ruby_system.network.master
+
+ rb_cntrl.probeFromRegionDir = MessageBuffer()
+ rb_cntrl.probeFromRegionDir.slave = ruby_system.network.master
+
+ rb_cntrl.unblockFromDir = MessageBuffer()
+ rb_cntrl.unblockFromDir.slave = ruby_system.network.master
+
+ rb_cntrl.responseToRegDir = MessageBuffer()
+ rb_cntrl.responseToRegDir.master = ruby_system.network.slave
+
+ rb_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ cpuCluster.add(cp_cntrl)
+ cpuCluster.add(rb_cntrl)
+
+ gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
+ for i in xrange(options.num_compute_units):
+
+ tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
+ issue_latency = 1,
+ number_of_TBEs = 2560)
+ # TBEs set to max outstanding requests
+ tcp_cntrl.create(options, ruby_system, system)
+ tcp_cntrl.WB = options.WB_L1
+ tcp_cntrl.disableL1 = False
+
+ exec("system.tcp_cntrl%d = tcp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(tcp_cntrl.coalescer)
+
+ # Connect the CP (TCP) controllers to the ruby network
+ tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
+
+ tcp_cntrl.unblockFromCore = MessageBuffer()
+ tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.probeToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
+ tcp_cntrl.responseToTCP.slave = ruby_system.network.master
+
+ tcp_cntrl.mandatoryQueue = MessageBuffer()
+
+ gpuCluster.add(tcp_cntrl)
+
+ for i in xrange(options.num_sqc):
+
+ sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
+ sqc_cntrl.create(options, ruby_system, system)
+
+ exec("system.sqc_cntrl%d = sqc_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(sqc_cntrl.sequencer)
+
+ # Connect the SQC controller to the ruby network
+ sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
+
+ sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.probeToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
+ sqc_cntrl.responseToSQC.slave = ruby_system.network.master
+
+ sqc_cntrl.mandatoryQueue = MessageBuffer()
+
+ # SQC also in GPU cluster
+ gpuCluster.add(sqc_cntrl)
+
+ numa_bit = 6
+
+ for i in xrange(options.num_tccs):
+
+ tcc_cntrl = TCCCntrl()
+ tcc_cntrl.create(options, ruby_system, system)
+ tcc_cntrl.l2_request_latency = 1
+ tcc_cntrl.l2_response_latency = options.TCC_latency
+ tcc_cntrl.WB = options.WB_L2
+ tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
+
+ # Connect the TCC controllers to the ruby network
+ tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
+ tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
+
+ tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
+ tcc_cntrl.responseToCore.master = ruby_system.network.slave
+
+ tcc_cntrl.probeFromNB = MessageBuffer()
+ tcc_cntrl.probeFromNB.slave = ruby_system.network.master
+
+ tcc_cntrl.responseFromNB = MessageBuffer()
+ tcc_cntrl.responseFromNB.slave = ruby_system.network.master
+
+ tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
+ tcc_cntrl.requestToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.responseToNB = MessageBuffer()
+ tcc_cntrl.responseToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.unblockToNB = MessageBuffer()
+ tcc_cntrl.unblockToNB.master = ruby_system.network.slave
+
+ tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ rb_cntrl = RBCntrl()
+ rb_cntrl.create(options, ruby_system, system)
+ rb_cntrl.number_of_TBEs = 2560 * options.num_compute_units
+ rb_cntrl.isOnCPU = False
+
+ # Connect the RB controllers to the ruby network
+ rb_cntrl.requestFromCore = MessageBuffer(ordered = True)
+ rb_cntrl.requestFromCore.slave = ruby_system.network.master
+
+ rb_cntrl.responseFromCore = MessageBuffer()
+ rb_cntrl.responseFromCore.slave = ruby_system.network.master
+
+ rb_cntrl.requestToNetwork = MessageBuffer()
+ rb_cntrl.requestToNetwork.master = ruby_system.network.slave
+
+ rb_cntrl.notifyFromRegionDir = MessageBuffer()
+ rb_cntrl.notifyFromRegionDir.slave = ruby_system.network.master
+
+ rb_cntrl.probeFromRegionDir = MessageBuffer()
+ rb_cntrl.probeFromRegionDir.slave = ruby_system.network.master
+
+ rb_cntrl.unblockFromDir = MessageBuffer()
+ rb_cntrl.unblockFromDir.slave = ruby_system.network.master
+
+ rb_cntrl.responseToRegDir = MessageBuffer()
+ rb_cntrl.responseToRegDir.master = ruby_system.network.slave
+
+ rb_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ tcc_cntrl.regionBufferNum = rb_cntrl.version
+
+ exec("system.tcc_cntrl%d = tcc_cntrl" % i)
+ exec("system.tcc_rb_cntrl%d = rb_cntrl" % i)
+
+ # TCC cntrls added to the GPU cluster
+ gpuCluster.add(tcc_cntrl)
+ gpuCluster.add(rb_cntrl)
+
+ # Because of wire buffers, num_l3caches must equal num_dirs
+ # Region coherence only works with 1 dir
+ assert(options.num_l3caches == options.num_dirs == 1)
+
+ # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
+ # Clusters
+ mainCluster = Cluster(intBW = crossbar_bw)
+
+ dir_cntrl = DirCntrl()
+ dir_cntrl.create(options, ruby_system, system)
+ dir_cntrl.number_of_TBEs = 2560 * options.num_compute_units
+ dir_cntrl.useL3OnWT = options.use_L3_on_WT
+
+ # Connect the Directory controller to the ruby network
+ dir_cntrl.requestFromCores = MessageBuffer()
+ dir_cntrl.requestFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.responseFromCores = MessageBuffer()
+ dir_cntrl.responseFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.unblockFromCores = MessageBuffer()
+ dir_cntrl.unblockFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.probeToCore = MessageBuffer()
+ dir_cntrl.probeToCore.master = ruby_system.network.slave
+
+ dir_cntrl.responseToCore = MessageBuffer()
+ dir_cntrl.responseToCore.master = ruby_system.network.slave
+
+ dir_cntrl.reqFromRegBuf = MessageBuffer()
+ dir_cntrl.reqFromRegBuf.slave = ruby_system.network.master
+
+ dir_cntrl.reqToRegDir = MessageBuffer(ordered = True)
+ dir_cntrl.reqToRegDir.master = ruby_system.network.slave
+
+ dir_cntrl.reqFromRegDir = MessageBuffer(ordered = True)
+ dir_cntrl.reqFromRegDir.slave = ruby_system.network.master
+
+ dir_cntrl.unblockToRegDir = MessageBuffer()
+ dir_cntrl.unblockToRegDir.master = ruby_system.network.slave
+
+ dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.responseFromMemory = MessageBuffer()
+
+ exec("system.dir_cntrl%d = dir_cntrl" % i)
+ dir_cntrl_nodes.append(dir_cntrl)
+
+ mainCluster.add(dir_cntrl)
+
+ reg_cntrl = RegionCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
+ reg_cntrl.create(options, ruby_system, system)
+ reg_cntrl.number_of_TBEs = options.num_tbes
+ reg_cntrl.cpuRegionBufferNum = system.rb_cntrl0.version
+ reg_cntrl.gpuRegionBufferNum = system.tcc_rb_cntrl0.version
+
+ # Connect the Region Dir controllers to the ruby network
+ reg_cntrl.requestToDir = MessageBuffer(ordered = True)
+ reg_cntrl.requestToDir.master = ruby_system.network.slave
+
+ reg_cntrl.notifyToRBuffer = MessageBuffer()
+ reg_cntrl.notifyToRBuffer.master = ruby_system.network.slave
+
+ reg_cntrl.probeToRBuffer = MessageBuffer()
+ reg_cntrl.probeToRBuffer.master = ruby_system.network.slave
+
+ reg_cntrl.responseFromRBuffer = MessageBuffer()
+ reg_cntrl.responseFromRBuffer.slave = ruby_system.network.master
+
+ reg_cntrl.requestFromRegBuf = MessageBuffer()
+ reg_cntrl.requestFromRegBuf.slave = ruby_system.network.master
+
+ reg_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ exec("system.reg_cntrl%d = reg_cntrl" % i)
+
+ mainCluster.add(reg_cntrl)
+
+ # Assuming no DMA devices
+ assert(len(dma_devices) == 0)
+
+ # Add cpu/gpu clusters to main cluster
+ mainCluster.add(cpuCluster)
+ mainCluster.add(gpuCluster)
+
+ ruby_system.network.number_of_virtual_networks = 10
+
+ return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
diff --git a/configs/ruby/MOESI_AMD_Base.py b/configs/ruby/MOESI_AMD_Base.py
new file mode 100644
index 000000000..4c8ad28b0
--- /dev/null
+++ b/configs/ruby/MOESI_AMD_Base.py
@@ -0,0 +1,326 @@
+#
+# Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# For use for simulation and test purposes only
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Lisa Hsu
+#
+
+import math
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from Ruby import create_topology
+from Ruby import send_evicts
+
+from Cluster import Cluster
+from Crossbar import Crossbar
+
+class CntrlBase:
+ _seqs = 0
+ @classmethod
+ def seqCount(cls):
+ # Use SeqCount not class since we need global count
+ CntrlBase._seqs += 1
+ return CntrlBase._seqs - 1
+
+ _cntrls = 0
+ @classmethod
+ def cntrlCount(cls):
+ # Use CntlCount not class since we need global count
+ CntrlBase._cntrls += 1
+ return CntrlBase._cntrls - 1
+
+ _version = 0
+ @classmethod
+ def versionCount(cls):
+ cls._version += 1 # Use count for this particular type
+ return cls._version - 1
+
+class L1DCache(RubyCache):
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.l1d_size)
+ self.assoc = options.l1d_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L1ICache(RubyCache):
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.l1i_size)
+ self.assoc = options.l1i_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L2Cache(RubyCache):
+ resourceStalls = False
+ def create(self, options):
+ self.size = MemorySize(options.l2_size)
+ self.assoc = options.l2_assoc
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class CPCntrl(CorePair_Controller, CntrlBase):
+
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.L1Icache = L1ICache()
+ self.L1Icache.create(options)
+ self.L1D0cache = L1DCache()
+ self.L1D0cache.create(options)
+ self.L1D1cache = L1DCache()
+ self.L1D1cache.create(options)
+ self.L2cache = L2Cache()
+ self.L2cache.create(options)
+
+ self.sequencer = RubySequencer()
+ self.sequencer.icache_hit_latency = 2
+ self.sequencer.dcache_hit_latency = 2
+ self.sequencer.version = self.seqCount()
+ self.sequencer.icache = self.L1Icache
+ self.sequencer.dcache = self.L1D0cache
+ self.sequencer.ruby_system = ruby_system
+ self.sequencer.coreid = 0
+ self.sequencer.is_cpu_sequencer = True
+
+ self.sequencer1 = RubySequencer()
+ self.sequencer1.version = self.seqCount()
+ self.sequencer1.icache = self.L1Icache
+ self.sequencer1.dcache = self.L1D1cache
+ self.sequencer1.icache_hit_latency = 2
+ self.sequencer1.dcache_hit_latency = 2
+ self.sequencer1.ruby_system = ruby_system
+ self.sequencer1.coreid = 1
+ self.sequencer1.is_cpu_sequencer = True
+
+ self.issue_latency = options.cpu_to_dir_latency
+ self.send_evictions = send_evicts(options)
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+class L3Cache(RubyCache):
+ assoc = 8
+ dataArrayBanks = 256
+ tagArrayBanks = 256
+
+ def create(self, options, ruby_system, system):
+ self.size = MemorySize(options.l3_size)
+ self.size.value /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataArrayBanks /= options.num_dirs
+ self.tagArrayBanks /= options.num_dirs
+ self.dataAccessLatency = options.l3_data_latency
+ self.tagAccessLatency = options.l3_tag_latency
+ self.resourceStalls = options.no_resource_stalls
+ self.replacement_policy = PseudoLRUReplacementPolicy()
+
+class L3Cntrl(L3Cache_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+ self.L3cache = L3Cache()
+ self.L3cache.create(options, ruby_system, system)
+
+ self.l3_response_latency = max(self.L3cache.dataAccessLatency,
+ self.L3cache.tagAccessLatency)
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+class DirMem(RubyDirectoryMemory, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ phys_mem_size = AddrRange(options.mem_size).size()
+ mem_module_size = phys_mem_size / options.num_dirs
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+ self.size = dir_size
+
+class DirCntrl(Directory_Controller, CntrlBase):
+ def create(self, options, ruby_system, system):
+ self.version = self.versionCount()
+
+ self.response_latency = 30
+
+ self.directory = DirMem()
+ self.directory.create(options, ruby_system, system)
+
+ self.L3CacheMemory = L3Cache()
+ self.L3CacheMemory.create(options, ruby_system, system)
+
+ self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
+ self.L3CacheMemory.tagAccessLatency)
+
+ self.number_of_TBEs = options.num_tbes
+
+ self.ruby_system = ruby_system
+
+ if options.recycle_latency:
+ self.recycle_latency = options.recycle_latency
+
+ self.CPUonly = True
+
+ def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
+ req_to_l3, probe_to_l3, resp_to_l3):
+ self.reqToDir = req_to_dir
+ self.respToDir = resp_to_dir
+ self.l3UnblockToDir = l3_unblock_to_dir
+ self.reqToL3 = req_to_l3
+ self.probeToL3 = probe_to_l3
+ self.respToL3 = resp_to_l3
+
+def define_options(parser):
+ parser.add_option("--num-subcaches", type="int", default=4)
+ parser.add_option("--l3-data-latency", type="int", default=20)
+ parser.add_option("--l3-tag-latency", type="int", default=15)
+ parser.add_option("--cpu-to-dir-latency", type="int", default=15)
+ parser.add_option("--no-resource-stalls", action="store_false",
+ default=True)
+ parser.add_option("--num-tbes", type="int", default=256)
+ parser.add_option("--l2-latency", type="int", default=50) # load to use
+
+def create_system(options, full_system, system, dma_devices, ruby_system):
+ if buildEnv['PROTOCOL'] != 'MOESI_AMD_Base':
+ panic("This script requires the MOESI_AMD_Base protocol.")
+
+ cpu_sequencers = []
+
+ #
+ # The ruby network creation expects the list of nodes in the system to
+ # be consistent with the NetDest list. Therefore the l1 controller
+ # nodes must be listed before the directory nodes and directory nodes
+ # before dma nodes, etc.
+ #
+ l1_cntrl_nodes = []
+ l3_cntrl_nodes = []
+ dir_cntrl_nodes = []
+
+ control_count = 0
+
+ #
+ # Must create the individual controllers before the network to ensure
+ # the controller constructors are called before the network constructor
+ #
+
+ # This is the base crossbar that connects the L3s, Dirs, and cpu
+ # Cluster
+ mainCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
+ for i in xrange(options.num_dirs):
+
+ dir_cntrl = DirCntrl(TCC_select_num_bits = 0)
+ dir_cntrl.create(options, ruby_system, system)
+
+ # Connect the Directory controller to the ruby network
+ dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
+ dir_cntrl.requestFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.responseFromCores = MessageBuffer()
+ dir_cntrl.responseFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.unblockFromCores = MessageBuffer()
+ dir_cntrl.unblockFromCores.slave = ruby_system.network.master
+
+ dir_cntrl.probeToCore = MessageBuffer()
+ dir_cntrl.probeToCore.master = ruby_system.network.slave
+
+ dir_cntrl.responseToCore = MessageBuffer()
+ dir_cntrl.responseToCore.master = ruby_system.network.slave
+
+ dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
+ dir_cntrl.responseFromMemory = MessageBuffer()
+
+ exec("system.dir_cntrl%d = dir_cntrl" % i)
+ dir_cntrl_nodes.append(dir_cntrl)
+
+ mainCluster.add(dir_cntrl)
+
+ # Technically this config can support an odd number of cpus, but the top
+ # level config files, such as the ruby_random_tester, will get confused if
+ # the number of cpus does not equal the number of sequencers. Thus make
+ # sure that an even number of cpus is specified.
+ assert((options.num_cpus % 2) == 0)
+
+ # For an odd number of CPUs, still create the right number of controllers
+ cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
+ for i in xrange((options.num_cpus + 1) / 2):
+
+ cp_cntrl = CPCntrl()
+ cp_cntrl.create(options, ruby_system, system)
+
+ exec("system.cp_cntrl%d = cp_cntrl" % i)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
+
+ # Connect the CP controllers and the network
+ cp_cntrl.requestFromCore = MessageBuffer()
+ cp_cntrl.requestFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.responseFromCore = MessageBuffer()
+ cp_cntrl.responseFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.unblockFromCore = MessageBuffer()
+ cp_cntrl.unblockFromCore.master = ruby_system.network.slave
+
+ cp_cntrl.probeToCore = MessageBuffer()
+ cp_cntrl.probeToCore.slave = ruby_system.network.master
+
+ cp_cntrl.responseToCore = MessageBuffer()
+ cp_cntrl.responseToCore.slave = ruby_system.network.master
+
+ cp_cntrl.mandatoryQueue = MessageBuffer()
+ cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
+
+ cpuCluster.add(cp_cntrl)
+
+ # Assuming no DMA devices
+ assert(len(dma_devices) == 0)
+
+ # Add cpu/gpu clusters to main cluster
+ mainCluster.add(cpuCluster)
+
+ ruby_system.network.number_of_virtual_networks = 10
+
+ return (cpu_sequencers, dir_cntrl_nodes, mainCluster)