summaryrefslogtreecommitdiff
path: root/configs/example
diff options
context:
space:
mode:
Diffstat (limited to 'configs/example')
-rw-r--r--configs/example/apu_se.py26
-rw-r--r--configs/example/fs.py6
-rw-r--r--configs/example/garnet_synth_traffic.py5
-rw-r--r--configs/example/hmctest.py8
-rw-r--r--configs/example/memcheck.py8
-rw-r--r--configs/example/memtest.py6
-rw-r--r--configs/example/read_config.py2
-rw-r--r--configs/example/ruby_gpu_random_test.py6
-rw-r--r--configs/example/ruby_mem_test.py8
-rw-r--r--configs/example/ruby_random_test.py4
-rw-r--r--configs/example/se.py6
11 files changed, 46 insertions, 39 deletions
diff --git a/configs/example/apu_se.py b/configs/example/apu_se.py
index bba0d0fad..146863d62 100644
--- a/configs/example/apu_se.py
+++ b/configs/example/apu_se.py
@@ -225,7 +225,7 @@ if options.TLB_config == "perLane":
# List of compute units; one GPU can have multiple compute units
compute_units = []
-for i in xrange(n_cu):
+for i in range(n_cu):
compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
num_SIMDs = options.simds_per_cu,
wfSize = options.wf_size,
@@ -255,8 +255,8 @@ for i in xrange(n_cu):
options.outOfOrderDataDelivery))
wavefronts = []
vrfs = []
- for j in xrange(options.simds_per_cu):
- for k in xrange(shader.n_wf):
+ for j in range(options.simds_per_cu):
+ for k in range(shader.n_wf):
wavefronts.append(Wavefront(simdId = j, wf_slot_id = k,
wfSize = options.wf_size))
vrfs.append(VectorRegisterFile(simd_id=j,
@@ -311,7 +311,7 @@ if fast_forward:
future_cpu_list = []
# Initial CPUs to be used during fast-forwarding.
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
cpu = CpuClass(cpu_id = i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
@@ -328,7 +328,7 @@ else:
MainCpuClass = CpuClass
# CPs to be used throughout the simulation.
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
cp = MainCpuClass(cpu_id = options.num_cpus + i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
@@ -337,7 +337,7 @@ for i in xrange(options.num_cp):
cp_list.append(cp)
# Main CPUs (to be used after fast-forwarding if fast-forwarding is specified).
-for i in xrange(options.num_cpus):
+for i in range(options.num_cpus):
cpu = MainCpuClass(cpu_id = i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
@@ -400,7 +400,7 @@ for cp in cp_list:
cp.workload = host_cpu.workload
if fast_forward:
- for i in xrange(len(future_cpu_list)):
+ for i in range(len(future_cpu_list)):
future_cpu_list[i].workload = cpu_list[i].workload
future_cpu_list[i].createThreads()
@@ -408,7 +408,7 @@ if fast_forward:
# List of CPUs that must be switched when moving between KVM and simulation
if fast_forward:
switch_cpu_list = \
- [(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)]
+ [(cpu_list[i], future_cpu_list[i]) for i in range(options.num_cpus)]
# Full list of processing cores in the system. Note that
# dispatcher is also added to cpu_list although it is
@@ -431,7 +431,7 @@ if fast_forward:
have_kvm_support = 'BaseKvmCPU' in globals()
if have_kvm_support and buildEnv['TARGET_ISA'] == "x86":
system.vm = KvmVM()
- for i in xrange(len(host_cpu.workload)):
+ for i in range(len(host_cpu.workload)):
host_cpu.workload[i].useArchPT = True
host_cpu.workload[i].kvmInSE = True
else:
@@ -479,15 +479,15 @@ gpu_port_idx = len(system.ruby._cpu_ports) \
gpu_port_idx = gpu_port_idx - options.num_cp * 2
wavefront_size = options.wf_size
-for i in xrange(n_cu):
+for i in range(n_cu):
# The pipeline issues wavefront_size number of uncoalesced requests
# in one GPU issue cycle. Hence wavefront_size mem ports.
- for j in xrange(wavefront_size):
+ for j in range(wavefront_size):
system.cpu[shader_idx].CUs[i].memory_port[j] = \
system.ruby._cpu_ports[gpu_port_idx].slave[j]
gpu_port_idx += 1
-for i in xrange(n_cu):
+for i in range(n_cu):
if i > 0 and not i % options.cu_per_sqc:
print("incrementing idx on ", i)
gpu_port_idx += 1
@@ -496,7 +496,7 @@ for i in xrange(n_cu):
gpu_port_idx = gpu_port_idx + 1
# attach CP ports to Ruby
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
system.cpu[cp_idx].createInterruptController()
system.cpu[cp_idx].dcache_port = \
system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
diff --git a/configs/example/fs.py b/configs/example/fs.py
index 6be9ba2c2..70275a0f6 100644
--- a/configs/example/fs.py
+++ b/configs/example/fs.py
@@ -138,7 +138,7 @@ def build_test_system(np):
# For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
- for i in xrange(np)]
+ for i in range(np)]
if CpuConfig.is_kvm_cpu(TestCPUClass) or CpuConfig.is_kvm_cpu(FutureClass):
test_sys.kvm_vm = KvmVM()
@@ -194,7 +194,7 @@ def build_test_system(np):
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
- for i in xrange(np):
+ for i in range(np):
if options.simpoint_profile:
test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
@@ -277,7 +277,7 @@ def build_drive_system(np):
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
- for i in xrange(len(drive_sys.mem_ctrls)):
+ for i in range(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
diff --git a/configs/example/garnet_synth_traffic.py b/configs/example/garnet_synth_traffic.py
index 92fb3a047..f5b7690de 100644
--- a/configs/example/garnet_synth_traffic.py
+++ b/configs/example/garnet_synth_traffic.py
@@ -87,7 +87,8 @@ parser.add_option("--inj-vnet", type="int", default=-1,
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile(open(os.path.join(config_root, "common", "Options.py")).read(),
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
@@ -112,7 +113,7 @@ cpus = [ GarnetSyntheticTraffic(
inj_vnet=options.inj_vnet,
precision=options.precision,
num_dest=options.num_dirs) \
- for i in xrange(options.num_cpus) ]
+ for i in range(options.num_cpus) ]
# create the desired simulated system
system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
diff --git a/configs/example/hmctest.py b/configs/example/hmctest.py
index c370d0a84..091ed8b03 100644
--- a/configs/example/hmctest.py
+++ b/configs/example/hmctest.py
@@ -57,17 +57,17 @@ def build_system(options):
system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd)
# add traffic generators to the system
system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in
- xrange(options.num_tgen)]
+ range(options.num_tgen)]
# Config memory system with given HMC arch
MemConfig.config_mem(options, system)
# Connect the traffic generatiors
if options.arch == "distributed":
- for i in xrange(options.num_tgen):
+ for i in range(options.num_tgen):
system.tgen[i].port = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
if options.arch == "mixed":
- for i in xrange(int(options.num_tgen/2)):
+ for i in range(int(options.num_tgen/2)):
system.tgen[i].port = system.membus.slave
hh = system.hmc_host
if options.enable_global_monitor:
@@ -82,7 +82,7 @@ def build_system(options):
system.system_port = system.membus.slave
if options.arch == "same":
hh = system.hmc_host
- for i in xrange(options.num_links_controllers):
+ for i in range(options.num_links_controllers):
if options.enable_global_monitor:
system.tgen[i].port = hh.lmonitor[i].slave
else:
diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py
index c2eed1959..1dae86fc3 100644
--- a/configs/example/memcheck.py
+++ b/configs/example/memcheck.py
@@ -246,9 +246,9 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
# The levels are indexing backwards through the list
ntesters = testerspec[len(cachespec) - level]
- testers = [proto_tester() for i in xrange(ntesters)]
+ testers = [proto_tester() for i in range(ntesters)]
checkers = [MemCheckerMonitor(memchecker = system.memchecker) \
- for i in xrange(ntesters)]
+ for i in range(ntesters)]
if ntesters:
subsys.tester = testers
subsys.checkers = checkers
@@ -264,8 +264,8 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
# Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers
# on this level
- tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
- tester_caches = [proto_l1() for i in xrange(ntesters)]
+ tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+ tester_caches = [proto_l1() for i in range(ntesters)]
subsys.cache = tester_caches + tree_caches
for cache in tree_caches:
diff --git a/configs/example/memtest.py b/configs/example/memtest.py
index d293164ce..81c826a41 100644
--- a/configs/example/memtest.py
+++ b/configs/example/memtest.py
@@ -257,7 +257,7 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
limit = (len(cachespec) - level + 1) * 100000000
testers = [proto_tester(interval = 10 * (level * level + 1),
progress_check = limit) \
- for i in xrange(ntesters)]
+ for i in range(ntesters)]
if ntesters:
subsys.tester = testers
@@ -272,8 +272,8 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
# Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers
# on this level
- tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
- tester_caches = [proto_l1() for i in xrange(ntesters)]
+ tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+ tester_caches = [proto_l1() for i in range(ntesters)]
subsys.cache = tester_caches + tree_caches
for cache in tree_caches:
diff --git a/configs/example/read_config.py b/configs/example/read_config.py
index 3c17d4b9c..0d60ec4cb 100644
--- a/configs/example/read_config.py
+++ b/configs/example/read_config.py
@@ -280,7 +280,7 @@ class ConfigManager(object):
# Assume that unnamed ports are unconnected
peers = self.config.get_port_peers(object_name, port_name)
- for index, peer in zip(xrange(0, len(peers)), peers):
+ for index, peer in zip(range(0, len(peers)), peers):
parsed_ports.append((
PortConnection(object_name, port.name, index),
PortConnection.from_string(peer)))
diff --git a/configs/example/ruby_gpu_random_test.py b/configs/example/ruby_gpu_random_test.py
index 162d3ff4f..175717701 100644
--- a/configs/example/ruby_gpu_random_test.py
+++ b/configs/example/ruby_gpu_random_test.py
@@ -76,7 +76,9 @@ parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+ open(os.path.join(config_root, "common", "Options.py")).read(), \
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
@@ -97,7 +99,7 @@ options.l3_assoc=2
assert(options.num_compute_units >= 1)
n_cu = options.num_compute_units
-options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc)
+options.num_sqc = int((n_cu + options.cu_per_sqc - 1) // options.cu_per_sqc)
if args:
print("Error: script doesn't take any positional arguments")
diff --git a/configs/example/ruby_mem_test.py b/configs/example/ruby_mem_test.py
index 68ad1ca66..880a150cd 100644
--- a/configs/example/ruby_mem_test.py
+++ b/configs/example/ruby_mem_test.py
@@ -65,7 +65,9 @@ parser.add_option("--suppress-func-warnings", action="store_true",
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+ open(os.path.join(config_root, "common", "Options.py")).read(), \
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
@@ -101,7 +103,7 @@ cpus = [ MemTest(max_loads = options.maxloads,
percent_uncacheable = 0,
progress_interval = options.progress,
suppress_func_warnings = options.suppress_func_warnings) \
- for i in xrange(options.num_cpus) ]
+ for i in range(options.num_cpus) ]
system = System(cpu = cpus,
clk_domain = SrcClockDomain(clock = options.sys_clock),
@@ -114,7 +116,7 @@ if options.num_dmas > 0:
progress_interval = options.progress,
suppress_func_warnings =
not options.suppress_func_warnings) \
- for i in xrange(options.num_dmas) ]
+ for i in range(options.num_dmas) ]
system.dma_devices = dmas
else:
dmas = []
diff --git a/configs/example/ruby_random_test.py b/configs/example/ruby_random_test.py
index d6b53cf3e..15d474cec 100644
--- a/configs/example/ruby_random_test.py
+++ b/configs/example/ruby_random_test.py
@@ -59,7 +59,9 @@ parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+ open(os.path.join(config_root, "common", "Options.py")).read(), \
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
diff --git a/configs/example/se.py b/configs/example/se.py
index fa9e89745..59af888e0 100644
--- a/configs/example/se.py
+++ b/configs/example/se.py
@@ -171,7 +171,7 @@ if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
-system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
+system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
@@ -220,7 +220,7 @@ if options.simpoint_profile:
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
-for i in xrange(np):
+for i in range(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
@@ -246,7 +246,7 @@ if options.ruby:
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
- for i in xrange(np):
+ for i in range(np):
ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby