summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorKevin Lim <ktlim@umich.edu>2006-10-09 22:59:56 -0400
committerKevin Lim <ktlim@umich.edu>2006-10-09 22:59:56 -0400
commitbdde892d668e17fb5a67de0e560a85b9092adf9e (patch)
tree3876a98dcd7f80aca7bf7e2153dbaa32c83a15b5 /tests
parenta9ae6c8656dc233996c81cdeb6f5c8539442af95 (diff)
parent5448517da4cd13e3c8438850f04367d9614d686b (diff)
downloadgem5-bdde892d668e17fb5a67de0e560a85b9092adf9e.tar.xz
Merge ktlim@zizzer:/bk/newmem
into zamp.eecs.umich.edu:/z/ktlim2/clean/o3-merge/newmem src/cpu/memtest/memtest.cc: src/cpu/memtest/memtest.hh: src/cpu/simple/timing.hh: tests/configs/o3-timing-mp.py: Hand merge. --HG-- extra : convert_revision : a58cc439eb5e8f900d175ed8b5a85b6c8723e558
Diffstat (limited to 'tests')
-rw-r--r--tests/configs/memtest.py94
-rw-r--r--tests/configs/o3-timing-mp.py4
-rw-r--r--tests/configs/simple-atomic-mp.py4
-rw-r--r--tests/configs/simple-atomic.py2
-rw-r--r--tests/configs/simple-timing-mp.py2
-rw-r--r--tests/configs/simple-timing.py2
-rw-r--r--tests/configs/tsunami-simple-atomic-dual.py2
-rw-r--r--tests/configs/tsunami-simple-atomic.py2
-rw-r--r--tests/configs/tsunami-simple-timing-dual.py2
-rw-r--r--tests/configs/tsunami-simple-timing.py2
-rw-r--r--tests/quick/00.hello.mp/test.py44
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt28
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr9
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt28
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout4
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-atomic/config.ini11
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-atomic/config.out14
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-atomic/stdout6
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-timing/config.ini13
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-timing/config.out16
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-timing/m5stats.txt36
-rw-r--r--tests/quick/00.hello/ref/mips/linux/simple-timing/stdout6
-rw-r--r--tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.ini9
-rw-r--r--tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.out12
-rw-r--r--tests/quick/00.hello/ref/sparc/linux/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/sparc/linux/simple-atomic/stdout4
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt60
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr18
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout4
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini4
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out4
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console54
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt400
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console54
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt144
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini4
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out4
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console54
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt411
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console54
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt142
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout8
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini1
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out1
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt6
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout4
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini1
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out1
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt28
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout4
-rw-r--r--tests/quick/50.memtest/test.py28
78 files changed, 1114 insertions, 847 deletions
diff --git a/tests/configs/memtest.py b/tests/configs/memtest.py
new file mode 100644
index 000000000..c5cd0246d
--- /dev/null
+++ b/tests/configs/memtest.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Ron Dreslinski
+
+import m5
+from m5.objects import *
+
+# --------------------
+# Base L1 Cache
+# ====================
+
+class L1(BaseCache):
+ latency = 1
+ block_size = 64
+ mshrs = 4
+ tgts_per_mshr = 8
+ protocol = CoherenceProtocol(protocol='moesi')
+
+# ----------------------
+# Base L2 Cache
+# ----------------------
+
+class L2(BaseCache):
+ block_size = 64
+ latency = 100
+ mshrs = 92
+ tgts_per_mshr = 16
+ write_buffers = 8
+
+#MAX CORES IS 8 with the fals sharing method
+nb_cores = 8
+cpus = [ MemTest(max_loads=1e12) for i in xrange(nb_cores) ]
+
+# system simulated
+system = System(cpu = cpus, funcmem = PhysicalMemory(),
+ physmem = PhysicalMemory(), membus = Bus())
+
+# l2cache & bus
+system.toL2Bus = Bus()
+system.l2c = L2(size='4MB', assoc=8)
+system.l2c.cpu_side = system.toL2Bus.port
+
+# connect l2c to membus
+system.l2c.mem_side = system.membus.port
+
+which_port = 0
+# add L1 caches
+for cpu in cpus:
+ cpu.l1c = L1(size = '32kB', assoc = 4)
+ cpu.l1c.cpu_side = cpu.test
+ cpu.l1c.mem_side = system.toL2Bus.port
+ if which_port == 0:
+ system.funcmem.port = cpu.functional
+ which_port = 1
+ else:
+ system.funcmem.functional = cpu.functional
+
+
+# connect memory to membus
+system.physmem.port = system.membus.port
+
+
+# -----------------------
+# run simulation
+# -----------------------
+
+root = Root( system = system )
+root.system.mem_mode = 'timing'
+#root.trace.flags="InstExec"
+root.trace.flags="Bus"
diff --git a/tests/configs/o3-timing-mp.py b/tests/configs/o3-timing-mp.py
index 09935d574..68631b3d2 100644
--- a/tests/configs/o3-timing-mp.py
+++ b/tests/configs/o3-timing-mp.py
@@ -53,7 +53,7 @@ class L2(BaseCache):
write_buffers = 8
nb_cores = 4
-cpus = [ DerivO3CPU() for i in xrange(nb_cores) ]
+cpus = [ DerivO3CPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
system = System(cpu = cpus, physmem = PhysicalMemory(), membus =
@@ -85,5 +85,5 @@ system.physmem.port = system.membus.port
root = Root( system = system )
root.system.mem_mode = 'timing'
-root.trace.flags="Bus Cache"
+#root.trace.flags="Bus Cache"
#root.trace.flags = "BusAddrRanges"
diff --git a/tests/configs/simple-atomic-mp.py b/tests/configs/simple-atomic-mp.py
index cc1a36dda..eaa6ec66e 100644
--- a/tests/configs/simple-atomic-mp.py
+++ b/tests/configs/simple-atomic-mp.py
@@ -52,10 +52,10 @@ class L2(BaseCache):
write_buffers = 8
nb_cores = 4
-cpus = [ AtomicSimpleCPU() for i in xrange(nb_cores) ]
+cpus = [ AtomicSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
-system = System(cpu = cpus, physmem = PhysicalMemory(), membus =
+system = System(cpu = cpus, physmem = PhysicalMemory(range = AddrRange('1024MB')), membus =
Bus())
# l2cache & bus
diff --git a/tests/configs/simple-atomic.py b/tests/configs/simple-atomic.py
index 2bf67f3b1..d35ac4ae0 100644
--- a/tests/configs/simple-atomic.py
+++ b/tests/configs/simple-atomic.py
@@ -29,7 +29,7 @@
import m5
from m5.objects import *
-system = System(cpu = AtomicSimpleCPU(),
+system = System(cpu = AtomicSimpleCPU(cpu_id=0),
physmem = PhysicalMemory(),
membus = Bus())
system.physmem.port = system.membus.port
diff --git a/tests/configs/simple-timing-mp.py b/tests/configs/simple-timing-mp.py
index 9fc5f3874..8f9ab0dde 100644
--- a/tests/configs/simple-timing-mp.py
+++ b/tests/configs/simple-timing-mp.py
@@ -52,7 +52,7 @@ class L2(BaseCache):
write_buffers = 8
nb_cores = 4
-cpus = [ TimingSimpleCPU() for i in xrange(nb_cores) ]
+cpus = [ TimingSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
system = System(cpu = cpus, physmem = PhysicalMemory(), membus =
diff --git a/tests/configs/simple-timing.py b/tests/configs/simple-timing.py
index 7bb76db0e..60190b47c 100644
--- a/tests/configs/simple-timing.py
+++ b/tests/configs/simple-timing.py
@@ -36,7 +36,7 @@ class MyCache(BaseCache):
mshrs = 10
tgts_per_mshr = 5
-cpu = TimingSimpleCPU()
+cpu = TimingSimpleCPU(cpu_id=0)
cpu.addTwoLevelCacheHierarchy(MyCache(size = '128kB'), MyCache(size = '256kB'),
MyCache(size = '2MB'))
cpu.mem = cpu.dcache
diff --git a/tests/configs/tsunami-simple-atomic-dual.py b/tests/configs/tsunami-simple-atomic-dual.py
index e3945f7dc..f798213db 100644
--- a/tests/configs/tsunami-simple-atomic-dual.py
+++ b/tests/configs/tsunami-simple-atomic-dual.py
@@ -34,7 +34,7 @@ import FSConfig
AlphaConsole.cpu = Parent.cpu[0]
IntrControl.cpu = Parent.cpu[0]
-cpus = [ AtomicSimpleCPU() for i in xrange(2) ]
+cpus = [ AtomicSimpleCPU(cpu_id=i) for i in xrange(2) ]
system = FSConfig.makeLinuxAlphaSystem('atomic')
system.cpu = cpus
for c in cpus:
diff --git a/tests/configs/tsunami-simple-atomic.py b/tests/configs/tsunami-simple-atomic.py
index ca1dd5c77..623d285e4 100644
--- a/tests/configs/tsunami-simple-atomic.py
+++ b/tests/configs/tsunami-simple-atomic.py
@@ -31,7 +31,7 @@ from m5.objects import *
m5.AddToPath('../configs/common')
import FSConfig
-cpu = AtomicSimpleCPU()
+cpu = AtomicSimpleCPU(cpu_id=0)
system = FSConfig.makeLinuxAlphaSystem('atomic')
system.cpu = cpu
cpu.connectMemPorts(system.membus)
diff --git a/tests/configs/tsunami-simple-timing-dual.py b/tests/configs/tsunami-simple-timing-dual.py
index 967d6a2d2..bf94214fd 100644
--- a/tests/configs/tsunami-simple-timing-dual.py
+++ b/tests/configs/tsunami-simple-timing-dual.py
@@ -34,7 +34,7 @@ import FSConfig
AlphaConsole.cpu = Parent.cpu[0]
IntrControl.cpu = Parent.cpu[0]
-cpus = [ TimingSimpleCPU() for i in xrange(2) ]
+cpus = [ TimingSimpleCPU(cpu_id=i) for i in xrange(2) ]
system = FSConfig.makeLinuxAlphaSystem('timing')
system.cpu = cpus
for c in cpus:
diff --git a/tests/configs/tsunami-simple-timing.py b/tests/configs/tsunami-simple-timing.py
index b3fc9d105..2edf5ac32 100644
--- a/tests/configs/tsunami-simple-timing.py
+++ b/tests/configs/tsunami-simple-timing.py
@@ -31,7 +31,7 @@ from m5.objects import *
m5.AddToPath('../configs/common')
import FSConfig
-cpu = TimingSimpleCPU()
+cpu = TimingSimpleCPU(cpu_id=0)
system = FSConfig.makeLinuxAlphaSystem('timing')
system.cpu = cpu
cpu.connectMemPorts(system.membus)
diff --git a/tests/quick/00.hello.mp/test.py b/tests/quick/00.hello.mp/test.py
new file mode 100644
index 000000000..91fbfb7ed
--- /dev/null
+++ b/tests/quick/00.hello.mp/test.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Ron Dreslinski
+
+# workload
+benchmarks = [
+ "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'",
+ "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'",
+ "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'",
+ "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'",
+ ]
+
+for i, cpu in zip(range(len(cpus)), root.system.cpu):
+ p = LiveProcess()
+ p.executable = benchmarks[i*2]
+ p.cmd = benchmarks[(i*2)+1]
+ root.system.cpu[i].workload = p
+ root.system.cpu[i].max_insts_all_threads = 10000000
+#root.system.cpu.workload = LiveProcess(cmd = 'hello',
+ # executable = binpath('hello'))
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt
index c2c9affca..59cda42d9 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt
@@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 420 # Nu
global.BPredUnit.condPredicted 1302 # Number of conditional branches predicted
global.BPredUnit.lookups 2254 # Number of BP lookups
global.BPredUnit.usedRAS 291 # Number of times the RAS was used to get a target.
-host_inst_rate 47059 # Simulator instruction rate (inst/s)
-host_mem_usage 160380 # Number of bytes of host memory used
+host_inst_rate 46995 # Simulator instruction rate (inst/s)
+host_mem_usage 160420 # Number of bytes of host memory used
host_seconds 0.12 # Real time elapsed on the host
-host_tick_rate 57322 # Simulator tick rate (ticks/s)
+host_tick_rate 57256 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 12 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 259 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 2049 # Number of loads inserted to the mem dependence unit.
@@ -334,41 +334,39 @@ system.cpu.l2cache.ReadReq_misses 492 # nu
system.cpu.l2cache.ReadReq_mshr_miss_latency 492 # number of ReadReq MSHR miss cycles
system.cpu.l2cache.ReadReq_mshr_miss_rate 0.995951 # mshr miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_mshr_misses 492 # number of ReadReq MSHR misses
-system.cpu.l2cache.WriteReq_accesses 2 # number of WriteReq accesses(hits+misses)
-system.cpu.l2cache.WriteReq_hits 2 # number of WriteReq hits
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.008130 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0.004065 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 496 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 494 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 2.071138 # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency
-system.cpu.l2cache.demand_hits 4 # number of demand (read+write) hits
+system.cpu.l2cache.demand_hits 2 # number of demand (read+write) hits
system.cpu.l2cache.demand_miss_latency 1019 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.991935 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate 0.995951 # miss rate for demand accesses
system.cpu.l2cache.demand_misses 492 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
system.cpu.l2cache.demand_mshr_miss_latency 492 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.991935 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate 0.995951 # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_misses 492 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 496 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 494 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2.071138 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_hits 4 # number of overall hits
+system.cpu.l2cache.overall_hits 2 # number of overall hits
system.cpu.l2cache.overall_miss_latency 1019 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.991935 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate 0.995951 # miss rate for overall accesses
system.cpu.l2cache.overall_misses 492 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
system.cpu.l2cache.overall_mshr_miss_latency 492 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.991935 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate 0.995951 # mshr miss rate for overall accesses
system.cpu.l2cache.overall_mshr_misses 492 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
@@ -385,7 +383,7 @@ system.cpu.l2cache.replacements 0 # nu
system.cpu.l2cache.sampled_refs 492 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.tagsinuse 290.948901 # Cycle average of tags in use
-system.cpu.l2cache.total_refs 4 # Total number of references to valid blocks.
+system.cpu.l2cache.total_refs 2 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.numCycles 6869 # number of cpu cycles simulated
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr
index 8893caac8..558105896 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr
@@ -1,3 +1,12 @@
warn: Entering event queue @ 0. Starting simulation...
warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000
warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout
index 14ef519e9..f2a1151c4 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:38:12
-M5 started Sat Oct 7 12:38:34 2006
+M5 compiled Oct 8 2006 20:54:51
+M5 started Sun Oct 8 20:55:10 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/o3-timing tests/run.py quick/00.hello/alpha/linux/o3-timing
Exiting @ tick 6868 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini
index f7e73950d..7340cc079 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU
children=workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out
index 198d7df5e..73f91ff61 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out
@@ -44,6 +44,7 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt
index e3cd05fb0..875e55644 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 74000 # Simulator instruction rate (inst/s)
-host_mem_usage 148088 # Number of bytes of host memory used
-host_seconds 0.08 # Real time elapsed on the host
-host_tick_rate 73591 # Simulator tick rate (ticks/s)
+host_inst_rate 172802 # Simulator instruction rate (inst/s)
+host_mem_usage 148116 # Number of bytes of host memory used
+host_seconds 0.03 # Real time elapsed on the host
+host_tick_rate 170614 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5642 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout
index e26480539..59f571aaf 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 11:12:49
-M5 started Sat Oct 7 11:13:02 2006
+M5 compiled Oct 8 2006 14:00:39
+M5 started Sun Oct 8 14:00:50 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-atomic tests/run.py quick/00.hello/alpha/linux/simple-atomic
Exiting @ tick 5641 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini
index cefcf7f11..7b517abc8 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out
index 1ed18ff71..5c4c7fb14 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out
@@ -83,6 +83,7 @@ max_loads_all_threads=0
progress_interval=0
mem=system.cpu.dcache
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt
index 97d39456e..2ee3181d8 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 286207 # Simulator instruction rate (inst/s)
-host_mem_usage 159648 # Number of bytes of host memory used
+host_inst_rate 292635 # Simulator instruction rate (inst/s)
+host_mem_usage 159688 # Number of bytes of host memory used
host_seconds 0.02 # Real time elapsed on the host
-host_tick_rate 413300 # Simulator tick rate (ticks/s)
+host_tick_rate 422303 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5642 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
@@ -153,41 +153,39 @@ system.cpu.l2cache.ReadReq_misses 441 # nu
system.cpu.l2cache.ReadReq_mshr_miss_latency 441 # number of ReadReq MSHR miss cycles
system.cpu.l2cache.ReadReq_mshr_miss_rate 0.997738 # mshr miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_mshr_misses 441 # number of ReadReq MSHR misses
-system.cpu.l2cache.WriteReq_accesses 2 # number of WriteReq accesses(hits+misses)
-system.cpu.l2cache.WriteReq_hits 2 # number of WriteReq hits
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.006803 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0.002268 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 444 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 442 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 2 # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency
-system.cpu.l2cache.demand_hits 3 # number of demand (read+write) hits
+system.cpu.l2cache.demand_hits 1 # number of demand (read+write) hits
system.cpu.l2cache.demand_miss_latency 882 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.993243 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate 0.997738 # miss rate for demand accesses
system.cpu.l2cache.demand_misses 441 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
system.cpu.l2cache.demand_mshr_miss_latency 441 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.993243 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate 0.997738 # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_misses 441 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 444 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 442 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_hits 3 # number of overall hits
+system.cpu.l2cache.overall_hits 1 # number of overall hits
system.cpu.l2cache.overall_miss_latency 882 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.993243 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate 0.997738 # miss rate for overall accesses
system.cpu.l2cache.overall_misses 441 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
system.cpu.l2cache.overall_mshr_miss_latency 441 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.993243 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate 0.997738 # mshr miss rate for overall accesses
system.cpu.l2cache.overall_mshr_misses 441 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
@@ -204,7 +202,7 @@ system.cpu.l2cache.replacements 0 # nu
system.cpu.l2cache.sampled_refs 441 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.tagsinuse 240.276061 # Cycle average of tags in use
-system.cpu.l2cache.total_refs 3 # Total number of references to valid blocks.
+system.cpu.l2cache.total_refs 1 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout b/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout
index a9c37a14d..be8eccb38 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:38:12
-M5 started Sat Oct 7 12:38:38 2006
+M5 compiled Oct 8 2006 14:00:39
+M5 started Sun Oct 8 14:00:50 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-timing tests/run.py quick/00.hello/alpha/linux/simple-timing
Exiting @ tick 8316 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt
index 53d94a43f..41348bbfb 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt
@@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 222 # Nu
global.BPredUnit.condPredicted 441 # Number of conditional branches predicted
global.BPredUnit.lookups 888 # Number of BP lookups
global.BPredUnit.usedRAS 160 # Number of times the RAS was used to get a target.
-host_inst_rate 45832 # Simulator instruction rate (inst/s)
-host_mem_usage 159900 # Number of bytes of host memory used
-host_seconds 0.05 # Real time elapsed on the host
-host_tick_rate 55090 # Simulator tick rate (ticks/s)
+host_inst_rate 26386 # Simulator instruction rate (inst/s)
+host_mem_usage 159884 # Number of bytes of host memory used
+host_seconds 0.09 # Real time elapsed on the host
+host_tick_rate 31792 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 9 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 7 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 675 # Number of loads inserted to the mem dependence unit.
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout
index fa94f7eb9..c51631489 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:38:12
-M5 started Sat Oct 7 12:38:40 2006
+M5 compiled Oct 8 2006 14:00:39
+M5 started Sun Oct 8 14:00:52 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/o3-timing tests/run.py quick/00.hello/alpha/tru64/o3-timing
Exiting @ tick 2886 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini
index 34f5c0b32..f248945b1 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU
children=workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out
index a474765ae..58ae0d9df 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out
@@ -44,6 +44,7 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt
index b120e12b9..e3f845135 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 548861 # Simulator instruction rate (inst/s)
-host_mem_usage 147820 # Number of bytes of host memory used
-host_seconds 0.01 # Real time elapsed on the host
-host_tick_rate 504404 # Simulator tick rate (ticks/s)
+host_inst_rate 60702 # Simulator instruction rate (inst/s)
+host_mem_usage 147692 # Number of bytes of host memory used
+host_seconds 0.04 # Real time elapsed on the host
+host_tick_rate 60102 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 2578 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout
index 0c9b00960..2ee4e0a08 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 11:12:49
-M5 started Sat Oct 7 11:13:09 2006
+M5 compiled Oct 8 2006 14:00:39
+M5 started Sun Oct 8 14:00:54 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-atomic tests/run.py quick/00.hello/alpha/tru64/simple-atomic
Exiting @ tick 2577 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini
index 0d7d34e64..5616cf909 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out
index 9b44f8ddd..c76e14e2c 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out
@@ -83,6 +83,7 @@ max_loads_all_threads=0
progress_interval=0
mem=system.cpu.dcache
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt
index 916f9dad8..39ef8ead8 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 196989 # Simulator instruction rate (inst/s)
-host_mem_usage 159172 # Number of bytes of host memory used
-host_seconds 0.01 # Real time elapsed on the host
-host_tick_rate 279840 # Simulator tick rate (ticks/s)
+host_inst_rate 69262 # Simulator instruction rate (inst/s)
+host_mem_usage 159156 # Number of bytes of host memory used
+host_seconds 0.04 # Real time elapsed on the host
+host_tick_rate 100319 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 2578 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout
index d152dc89c..27e317357 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:38:12
-M5 started Sat Oct 7 12:38:45 2006
+M5 compiled Oct 8 2006 14:00:39
+M5 started Sun Oct 8 14:00:54 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-timing tests/run.py quick/00.hello/alpha/tru64/simple-timing
Exiting @ tick 3777 because target called exit()
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.ini b/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.ini
index 2c82b8c1a..59cadaa12 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.ini
+++ b/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU
children=workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
@@ -64,6 +65,7 @@ max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
mem=system.physmem
+progress_interval=0
simulate_stalls=false
system=system
width=1
@@ -74,15 +76,23 @@ icache_port=system.membus.port[1]
[system.cpu.workload]
type=LiveProcess
cmd=hello
+egid=100
env=
+euid=100
executable=tests/test-progs/hello/bin/mips/linux/hello
+gid=100
input=cin
output=cout
+pid=100
+ppid=99
system=system
+uid=100
[system.membus]
type=Bus
bus_id=0
+clock=1000
+width=64
port=system.physmem.port system.cpu.icache_port system.cpu.dcache_port
[system.physmem]
@@ -94,6 +104,7 @@ port=system.membus.port[0]
[trace]
bufsize=0
+cycle=0
dump_on_exit=false
file=cout
flags=
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.out b/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.out
index 8678c0d97..064f467da 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.out
+++ b/tests/quick/00.hello/ref/mips/linux/simple-atomic/config.out
@@ -19,6 +19,8 @@ mem_mode=atomic
[system.membus]
type=Bus
bus_id=0
+clock=1000
+width=64
[system.cpu.workload]
type=LiveProcess
@@ -28,6 +30,12 @@ input=cin
output=cout
env=
system=system
+uid=100
+euid=100
+gid=100
+egid=100
+pid=100
+ppid=99
[system.cpu]
type=AtomicSimpleCPU
@@ -35,8 +43,10 @@ max_insts_any_thread=0
max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
+progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
@@ -48,6 +58,7 @@ simulate_stalls=false
[trace]
flags=
start=0
+cycle=0
bufsize=0
file=cout
dump_on_exit=false
@@ -91,3 +102,6 @@ trace_system=client
[debug]
break_cycles=
+[statsreset]
+reset_cycle=0
+
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-atomic/m5stats.txt b/tests/quick/00.hello/ref/mips/linux/simple-atomic/m5stats.txt
index b70a6ee17..3b2a2730b 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-atomic/m5stats.txt
+++ b/tests/quick/00.hello/ref/mips/linux/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 90956 # Simulator instruction rate (inst/s)
-host_mem_usage 147380 # Number of bytes of host memory used
-host_seconds 0.06 # Real time elapsed on the host
-host_tick_rate 90353 # Simulator tick rate (ticks/s)
+host_inst_rate 52255 # Simulator instruction rate (inst/s)
+host_mem_usage 148024 # Number of bytes of host memory used
+host_seconds 0.11 # Real time elapsed on the host
+host_tick_rate 52038 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5657 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-atomic/stdout b/tests/quick/00.hello/ref/mips/linux/simple-atomic/stdout
index f5b9c8fd7..600b178b3 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-atomic/stdout
+++ b/tests/quick/00.hello/ref/mips/linux/simple-atomic/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Sep 5 2006 15:37:09
-M5 started Tue Sep 5 15:46:32 2006
+M5 compiled Oct 9 2006 19:28:25
+M5 started Mon Oct 9 19:28:56 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/MIPS_SE/m5.opt -d build/MIPS_SE/tests/opt/quick/00.hello/mips/linux/simple-atomic tests/run.py quick/00.hello/mips/linux/simple-atomic
+command line: build/MIPS_SE/m5.debug -d build/MIPS_SE/tests/debug/quick/00.hello/mips/linux/simple-atomic tests/run.py quick/00.hello/mips/linux/simple-atomic
Exiting @ tick 5656 because target called exit()
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-timing/config.ini b/tests/quick/00.hello/ref/mips/linux/simple-timing/config.ini
index 040735f2c..8e1bb0388 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-timing/config.ini
+++ b/tests/quick/00.hello/ref/mips/linux/simple-timing/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
@@ -64,6 +65,7 @@ max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
mem=system.cpu.dcache
+progress_interval=0
system=system
workload=system.cpu.workload
dcache_port=system.cpu.dcache.cpu_side
@@ -192,20 +194,30 @@ mem_side=system.membus.port[1]
[system.cpu.toL2Bus]
type=Bus
bus_id=0
+clock=1000
+width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
[system.cpu.workload]
type=LiveProcess
cmd=hello
+egid=100
env=
+euid=100
executable=tests/test-progs/hello/bin/mips/linux/hello
+gid=100
input=cin
output=cout
+pid=100
+ppid=99
system=system
+uid=100
[system.membus]
type=Bus
bus_id=0
+clock=1000
+width=64
port=system.physmem.port system.cpu.l2cache.mem_side
[system.physmem]
@@ -217,6 +229,7 @@ port=system.membus.port[0]
[trace]
bufsize=0
+cycle=0
dump_on_exit=false
file=cout
flags=
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-timing/config.out b/tests/quick/00.hello/ref/mips/linux/simple-timing/config.out
index a7270a97e..d683d2355 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-timing/config.out
+++ b/tests/quick/00.hello/ref/mips/linux/simple-timing/config.out
@@ -19,6 +19,8 @@ mem_mode=atomic
[system.membus]
type=Bus
bus_id=0
+clock=1000
+width=64
[system.cpu.dcache]
type=BaseCache
@@ -67,6 +69,12 @@ input=cin
output=cout
env=
system=system
+uid=100
+euid=100
+gid=100
+egid=100
+pid=100
+ppid=99
[system.cpu]
type=TimingSimpleCPU
@@ -74,8 +82,10 @@ max_insts_any_thread=0
max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
+progress_interval=0
mem=system.cpu.dcache
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
@@ -87,6 +97,8 @@ function_trace_start=0
[system.cpu.toL2Bus]
type=Bus
bus_id=0
+clock=1000
+width=64
[system.cpu.icache]
type=BaseCache
@@ -169,6 +181,7 @@ hit_latency=1
[trace]
flags=
start=0
+cycle=0
bufsize=0
file=cout
dump_on_exit=false
@@ -212,3 +225,6 @@ trace_system=client
[debug]
break_cycles=
+[statsreset]
+reset_cycle=0
+
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-timing/m5stats.txt b/tests/quick/00.hello/ref/mips/linux/simple-timing/m5stats.txt
index bc5ad3cca..ab86ba509 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/mips/linux/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 273933 # Simulator instruction rate (inst/s)
-host_mem_usage 159012 # Number of bytes of host memory used
-host_seconds 0.02 # Real time elapsed on the host
-host_tick_rate 403699 # Simulator tick rate (ticks/s)
+host_inst_rate 68704 # Simulator instruction rate (inst/s)
+host_mem_usage 166092 # Number of bytes of host memory used
+host_seconds 0.08 # Real time elapsed on the host
+host_tick_rate 103651 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5657 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
@@ -53,7 +53,7 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 2054 # number of overall (read+write) accesses
system.cpu.dcache.overall_avg_miss_latency 3 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 2 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
system.cpu.dcache.overall_hits 1922 # number of overall hits
system.cpu.dcache.overall_miss_latency 396 # number of overall miss cycles
system.cpu.dcache.overall_miss_rate 0.064265 # miss rate for overall accesses
@@ -115,7 +115,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 5658 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 2.993399 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 1.993399 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 5355 # number of overall hits
system.cpu.icache.overall_miss_latency 907 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.053552 # miss rate for overall accesses
@@ -153,41 +153,39 @@ system.cpu.l2cache.ReadReq_misses 433 # nu
system.cpu.l2cache.ReadReq_mshr_miss_latency 433 # number of ReadReq MSHR miss cycles
system.cpu.l2cache.ReadReq_mshr_miss_rate 0.995402 # mshr miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_mshr_misses 433 # number of ReadReq MSHR misses
-system.cpu.l2cache.WriteReq_accesses 1 # number of WriteReq accesses(hits+misses)
-system.cpu.l2cache.WriteReq_hits 1 # number of WriteReq hits
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.006928 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0.004619 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 436 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 435 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 2 # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency
-system.cpu.l2cache.demand_hits 3 # number of demand (read+write) hits
+system.cpu.l2cache.demand_hits 2 # number of demand (read+write) hits
system.cpu.l2cache.demand_miss_latency 866 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.993119 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate 0.995402 # miss rate for demand accesses
system.cpu.l2cache.demand_misses 433 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
system.cpu.l2cache.demand_mshr_miss_latency 433 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.993119 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate 0.995402 # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_misses 433 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 436 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 435 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_hits 3 # number of overall hits
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_hits 2 # number of overall hits
system.cpu.l2cache.overall_miss_latency 866 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.993119 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate 0.995402 # miss rate for overall accesses
system.cpu.l2cache.overall_misses 433 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
system.cpu.l2cache.overall_mshr_miss_latency 433 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.993119 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate 0.995402 # mshr miss rate for overall accesses
system.cpu.l2cache.overall_mshr_misses 433 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
@@ -204,7 +202,7 @@ system.cpu.l2cache.replacements 0 # nu
system.cpu.l2cache.sampled_refs 433 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.tagsinuse 226.406294 # Cycle average of tags in use
-system.cpu.l2cache.total_refs 3 # Total number of references to valid blocks.
+system.cpu.l2cache.total_refs 2 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles
diff --git a/tests/quick/00.hello/ref/mips/linux/simple-timing/stdout b/tests/quick/00.hello/ref/mips/linux/simple-timing/stdout
index 954193ee0..4acd2a2e5 100644
--- a/tests/quick/00.hello/ref/mips/linux/simple-timing/stdout
+++ b/tests/quick/00.hello/ref/mips/linux/simple-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:52:26
-M5 started Sat Oct 7 12:52:42 2006
+M5 compiled Oct 9 2006 19:28:25
+M5 started Mon Oct 9 19:28:56 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/MIPS_SE/m5.opt -d build/MIPS_SE/tests/opt/quick/00.hello/mips/linux/simple-timing tests/run.py quick/00.hello/mips/linux/simple-timing
+command line: build/MIPS_SE/m5.debug -d build/MIPS_SE/tests/debug/quick/00.hello/mips/linux/simple-timing tests/run.py quick/00.hello/mips/linux/simple-timing
Exiting @ tick 8579 because target called exit()
diff --git a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.ini b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.ini
index 082415a7f..21028fa63 100644
--- a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.ini
+++ b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU
children=workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
@@ -64,6 +65,7 @@ max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
mem=system.physmem
+progress_interval=0
simulate_stalls=false
system=system
width=1
@@ -74,11 +76,17 @@ icache_port=system.membus.port[1]
[system.cpu.workload]
type=LiveProcess
cmd=hello
+egid=100
env=
+euid=100
executable=tests/test-progs/hello/bin/sparc/linux/hello
+gid=100
input=cin
output=cout
+pid=100
+ppid=99
system=system
+uid=100
[system.membus]
type=Bus
@@ -94,6 +102,7 @@ port=system.membus.port[0]
[trace]
bufsize=0
+cycle=0
dump_on_exit=false
file=cout
flags=
diff --git a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.out b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.out
index 45412a511..f5be4e3bd 100644
--- a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.out
+++ b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/config.out
@@ -28,6 +28,12 @@ input=cin
output=cout
env=
system=system
+uid=100
+euid=100
+gid=100
+egid=100
+pid=100
+ppid=99
[system.cpu]
type=AtomicSimpleCPU
@@ -35,8 +41,10 @@ max_insts_any_thread=0
max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
+progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
@@ -48,6 +56,7 @@ simulate_stalls=false
[trace]
flags=
start=0
+cycle=0
bufsize=0
file=cout
dump_on_exit=false
@@ -91,3 +100,6 @@ trace_system=client
[debug]
break_cycles=
+[statsreset]
+reset_cycle=0
+
diff --git a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/m5stats.txt b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/m5stats.txt
index 9bfb2fec9..e87e77b8f 100644
--- a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/m5stats.txt
+++ b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 61348 # Simulator instruction rate (inst/s)
-host_mem_usage 147288 # Number of bytes of host memory used
-host_seconds 0.07 # Real time elapsed on the host
-host_tick_rate 60991 # Simulator tick rate (ticks/s)
+host_inst_rate 2175 # Simulator instruction rate (inst/s)
+host_mem_usage 147292 # Number of bytes of host memory used
+host_seconds 2.06 # Real time elapsed on the host
+host_tick_rate 2174 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 4483 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/stdout b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/stdout
index 38eb82c8b..c9df3a17c 100644
--- a/tests/quick/00.hello/ref/sparc/linux/simple-atomic/stdout
+++ b/tests/quick/00.hello/ref/sparc/linux/simple-atomic/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Sep 5 2006 15:39:50
-M5 started Tue Sep 5 15:49:24 2006
+M5 compiled Oct 8 2006 14:19:59
+M5 started Sun Oct 8 14:20:03 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/SPARC_SE/m5.opt -d build/SPARC_SE/tests/opt/quick/00.hello/sparc/linux/simple-atomic tests/run.py quick/00.hello/sparc/linux/simple-atomic
Exiting @ tick 4482 because target called exit()
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt
index a249947ca..9871af3ab 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt
@@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 1081 # Nu
global.BPredUnit.condPredicted 2449 # Number of conditional branches predicted
global.BPredUnit.lookups 4173 # Number of BP lookups
global.BPredUnit.usedRAS 551 # Number of times the RAS was used to get a target.
-host_inst_rate 50082 # Simulator instruction rate (inst/s)
-host_mem_usage 161260 # Number of bytes of host memory used
-host_seconds 0.22 # Real time elapsed on the host
-host_tick_rate 37535 # Simulator tick rate (ticks/s)
+host_inst_rate 48339 # Simulator instruction rate (inst/s)
+host_mem_usage 161300 # Number of bytes of host memory used
+host_seconds 0.23 # Real time elapsed on the host
+host_tick_rate 36232 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 41 # Number of conflicting loads.
memdepunit.memDep.conflictingLoads 39 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 194 # Number of conflicting stores.
@@ -115,7 +115,7 @@ system.cpu.dcache.WriteReq_mshr_miss_rate 0.088670 # m
system.cpu.dcache.WriteReq_mshr_miss_rate_0 0.088670 # mshr miss rate for WriteReq accesses
system.cpu.dcache.WriteReq_mshr_misses 144 # number of WriteReq MSHR misses
system.cpu.dcache.WriteReq_mshr_misses_0 144 # number of WriteReq MSHR misses
-system.cpu.dcache.avg_blocked_cycles_no_mshrs no value # average number of cycles each access was blocked
+system.cpu.dcache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.dcache.avg_blocked_cycles_no_targets 1 # average number of cycles each access was blocked
system.cpu.dcache.avg_refs 11.670554 # Average number of references to valid blocks.
system.cpu.dcache.blocked_no_mshrs 0 # number of cycles access was blocked
@@ -193,7 +193,7 @@ system.cpu.dcache.overall_mshr_miss_latency_0 741
system.cpu.dcache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles
system.cpu.dcache.overall_mshr_miss_rate 0.075551 # mshr miss rate for overall accesses
system.cpu.dcache.overall_mshr_miss_rate_0 0.075551 # mshr miss rate for overall accesses
-system.cpu.dcache.overall_mshr_miss_rate_1 <err: div-0> # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_miss_rate_1 no value # mshr miss rate for overall accesses
system.cpu.dcache.overall_mshr_misses 343 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_misses_0 343 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_misses_1 0 # number of overall MSHR misses
@@ -590,20 +590,16 @@ system.cpu.l2cache.ReadReq_mshr_miss_rate 0.994802 # m
system.cpu.l2cache.ReadReq_mshr_miss_rate_0 0.994802 # mshr miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_mshr_misses 957 # number of ReadReq MSHR misses
system.cpu.l2cache.ReadReq_mshr_misses_0 957 # number of ReadReq MSHR misses
-system.cpu.l2cache.WriteReq_accesses 4 # number of WriteReq accesses(hits+misses)
-system.cpu.l2cache.WriteReq_accesses_0 4 # number of WriteReq accesses(hits+misses)
-system.cpu.l2cache.WriteReq_hits 4 # number of WriteReq hits
-system.cpu.l2cache.WriteReq_hits_0 4 # number of WriteReq hits
-system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.009404 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_blocked_cycles_no_mshrs no value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_blocked_cycles_no_targets no value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_refs 0.005225 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 966 # number of demand (read+write) accesses
-system.cpu.l2cache.demand_accesses_0 966 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 962 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses_0 962 # number of demand (read+write) accesses
system.cpu.l2cache.demand_accesses_1 0 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 2.059561 # average overall miss latency
system.cpu.l2cache.demand_avg_miss_latency_0 2.059561 # average overall miss latency
@@ -611,15 +607,15 @@ system.cpu.l2cache.demand_avg_miss_latency_1 <err: div-0>
system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency_0 1 # average overall mshr miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
-system.cpu.l2cache.demand_hits 9 # number of demand (read+write) hits
-system.cpu.l2cache.demand_hits_0 9 # number of demand (read+write) hits
+system.cpu.l2cache.demand_hits 5 # number of demand (read+write) hits
+system.cpu.l2cache.demand_hits_0 5 # number of demand (read+write) hits
system.cpu.l2cache.demand_hits_1 0 # number of demand (read+write) hits
system.cpu.l2cache.demand_miss_latency 1971 # number of demand (read+write) miss cycles
system.cpu.l2cache.demand_miss_latency_0 1971 # number of demand (read+write) miss cycles
system.cpu.l2cache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.990683 # miss rate for demand accesses
-system.cpu.l2cache.demand_miss_rate_0 0.990683 # miss rate for demand accesses
-system.cpu.l2cache.demand_miss_rate_1 no value # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate 0.994802 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate_0 0.994802 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate_1 <err: div-0> # miss rate for demand accesses
system.cpu.l2cache.demand_misses 957 # number of demand (read+write) misses
system.cpu.l2cache.demand_misses_0 957 # number of demand (read+write) misses
system.cpu.l2cache.demand_misses_1 0 # number of demand (read+write) misses
@@ -629,9 +625,9 @@ system.cpu.l2cache.demand_mshr_hits_1 0 # nu
system.cpu.l2cache.demand_mshr_miss_latency 957 # number of demand (read+write) MSHR miss cycles
system.cpu.l2cache.demand_mshr_miss_latency_0 957 # number of demand (read+write) MSHR miss cycles
system.cpu.l2cache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.990683 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_miss_rate_0 0.990683 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_miss_rate_1 no value # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate 0.994802 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate_0 0.994802 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate_1 <err: div-0> # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_misses 957 # number of demand (read+write) MSHR misses
system.cpu.l2cache.demand_mshr_misses_0 957 # number of demand (read+write) MSHR misses
system.cpu.l2cache.demand_mshr_misses_1 0 # number of demand (read+write) MSHR misses
@@ -640,8 +636,8 @@ system.cpu.l2cache.mshr_cap_events 0 # nu
system.cpu.l2cache.mshr_cap_events_0 0 # number of times MSHR cap was activated
system.cpu.l2cache.mshr_cap_events_1 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 966 # number of overall (read+write) accesses
-system.cpu.l2cache.overall_accesses_0 966 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 962 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses_0 962 # number of overall (read+write) accesses
system.cpu.l2cache.overall_accesses_1 0 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2.059561 # average overall miss latency
system.cpu.l2cache.overall_avg_miss_latency_0 2.059561 # average overall miss latency
@@ -652,14 +648,14 @@ system.cpu.l2cache.overall_avg_mshr_miss_latency_1 <err: div-0>
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_0 <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_1 <err: div-0> # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_hits 9 # number of overall hits
-system.cpu.l2cache.overall_hits_0 9 # number of overall hits
+system.cpu.l2cache.overall_hits 5 # number of overall hits
+system.cpu.l2cache.overall_hits_0 5 # number of overall hits
system.cpu.l2cache.overall_hits_1 0 # number of overall hits
system.cpu.l2cache.overall_miss_latency 1971 # number of overall miss cycles
system.cpu.l2cache.overall_miss_latency_0 1971 # number of overall miss cycles
system.cpu.l2cache.overall_miss_latency_1 0 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.990683 # miss rate for overall accesses
-system.cpu.l2cache.overall_miss_rate_0 0.990683 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate 0.994802 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate_0 0.994802 # miss rate for overall accesses
system.cpu.l2cache.overall_miss_rate_1 <err: div-0> # miss rate for overall accesses
system.cpu.l2cache.overall_misses 957 # number of overall misses
system.cpu.l2cache.overall_misses_0 957 # number of overall misses
@@ -670,8 +666,8 @@ system.cpu.l2cache.overall_mshr_hits_1 0 # nu
system.cpu.l2cache.overall_mshr_miss_latency 957 # number of overall MSHR miss cycles
system.cpu.l2cache.overall_mshr_miss_latency_0 957 # number of overall MSHR miss cycles
system.cpu.l2cache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.990683 # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_miss_rate_0 0.990683 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate 0.994802 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate_0 0.994802 # mshr miss rate for overall accesses
system.cpu.l2cache.overall_mshr_miss_rate_1 <err: div-0> # mshr miss rate for overall accesses
system.cpu.l2cache.overall_mshr_misses 957 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_misses_0 957 # number of overall MSHR misses
@@ -699,7 +695,7 @@ system.cpu.l2cache.soft_prefetch_mshr_full 0 #
system.cpu.l2cache.soft_prefetch_mshr_full_0 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.soft_prefetch_mshr_full_1 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.tagsinuse 558.911632 # Cycle average of tags in use
-system.cpu.l2cache.total_refs 9 # Total number of references to valid blocks.
+system.cpu.l2cache.total_refs 5 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.l2cache.writebacks_0 0 # number of writebacks
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr
index 890488cd2..48d711163 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr
@@ -2,3 +2,21 @@ warn: Entering event queue @ 0. Starting simulation...
warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000
warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
+warn: Default fetch doesn't update it's state from a functional call.
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout
index be25795fb..41cca6f14 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout
@@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:38:12
-M5 started Sat Oct 7 12:38:47 2006
+M5 compiled Oct 8 2006 20:54:51
+M5 started Sun Oct 8 20:55:24 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/01.hello-2T-smt/alpha/linux/o3-timing tests/run.py quick/01.hello-2T-smt/alpha/linux/o3-timing
Exiting @ tick 8441 because target called exit()
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini
index 3d719c501..401611d58 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini
@@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=AtomicSimpleCPU
children=dtb itb
clock=1
-cpu_id=-1
+cpu_id=0
defer_registration=false
dtb=system.cpu0.dtb
function_trace=false
@@ -106,7 +106,7 @@ size=48
type=AtomicSimpleCPU
children=dtb itb
clock=1
-cpu_id=-1
+cpu_id=1
defer_registration=false
dtb=system.cpu1.dtb
function_trace=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out
index b8290213e..1d4d50845 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out
@@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
itb=system.cpu0.itb
dtb=system.cpu0.dtb
-cpu_id=-1
profile=0
clock=1
defer_registration=false
@@ -118,9 +118,9 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=1
itb=system.cpu1.itb
dtb=system.cpu1.dtb
-cpu_id=-1
profile=0
clock=1
defer_registration=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console
index 4a397ddbf..27adebb82 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console
@@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
- kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x2
+ kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x2
CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 2 processor(s)
KSP: 0x20043FE8 PTBR 0x20
@@ -16,29 +16,27 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Bootstraping CPU 1 with sp=0xFFFFFC0000076000
unix_boot_mem ends at FFFFFC0000078000
k_argc = 0
- jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028)
- CallbackFixup 0 18000, t7=FFFFFC0000700000
+ jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
+ CallbackFixup 0 18000, t7=FFFFFC000070C000
Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400
- Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005
+ Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384
- freeing pages 1030:16384
- reserving pages 1030:1031
+ freeing pages 1069:16384
+ reserving pages 1069:1070
SMP: 2 CPUs probed -- cpu_present_mask = 3
Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0
- PID hash table entries: 1024 (order 10: 16384 bytes)
+ PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900
Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
- Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init)
- Mount-cache hash table entries: 512 (order: 0, 8192 bytes)
- per-CPU timeslice cutoff: 374.49 usecs.
- task migration cache decay timeout: 0 msecs.
+ Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init)
+ Mount-cache hash table entries: 512
SMP starting up secondaries.
Slave CPU 1 console command START
SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb FFFFFC0000018400 my_rpb_phys 18400
@@ -53,16 +51,21 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12
- Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled
+ Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
+ io scheduler noop registered
+ io scheduler anticipatory registered
+ io scheduler deadline registered
+ io scheduler cfq registered
loop: loaded (max 8 devices)
- Using anticipatory io scheduler
nbd: registered device at major 43
- sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver
- eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg
+ eth0: using 64 bit addressing.
+ eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
+ tun: Universal TUN/TAP device driver, 1.6
+ tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0
@@ -75,24 +78,23 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
+ hda: cache flushes not supported
hda: hda1
hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
+ hdb: cache flushes not supported
hdb: unknown partition table
- scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
- Vendor: Linux Model: scsi_m5 Li Rev: 0004
- Type: Direct-Access ANSI SCSI revision: 03
- SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
- SCSI device sda: drive cache: write back
- sda: unknown partition table
- Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2
- IP: routing cache hash table of 1024 buckets, 16Kbytes
- TCP: Hash tables configured (established 8192 bind 8192)
- ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack
+ IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
+ TCP established hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP: Hash tables configured (established 16384 bind 16384)
+ TCP reno registered
+ ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller
+ TCP bic registered
Initializing IPsec netlink socket
NET: Registered protocol family 1
NET: Registered protocol family 17
@@ -101,7 +103,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly.
- Freeing unused kernel memory: 480k freed
+ Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems...
loading script...
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt
index 376929ebb..e76c1d683 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt
@@ -1,232 +1,224 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 1361363 # Simulator instruction rate (inst/s)
-host_mem_usage 194440 # Number of bytes of host memory used
-host_seconds 45.04 # Real time elapsed on the host
-host_tick_rate 78691874 # Simulator tick rate (ticks/s)
+host_inst_rate 1270607 # Simulator instruction rate (inst/s)
+host_mem_usage 197696 # Number of bytes of host memory used
+host_seconds 51.09 # Real time elapsed on the host
+host_tick_rate 72782461 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
-sim_insts 61314617 # Number of instructions simulated
-sim_seconds 1.772124 # Number of seconds simulated
-sim_ticks 3544247159 # Number of ticks simulated
-system.cpu0.dtb.accesses 1850344 # DTB accesses
-system.cpu0.dtb.acv 301 # DTB access violations
-system.cpu0.dtb.hits 12691711 # DTB hits
-system.cpu0.dtb.misses 8349 # DTB misses
-system.cpu0.dtb.read_accesses 509385 # DTB read accesses
-system.cpu0.dtb.read_acv 184 # DTB read access violations
-system.cpu0.dtb.read_hits 7018751 # DTB read hits
-system.cpu0.dtb.read_misses 6579 # DTB read misses
-system.cpu0.dtb.write_accesses 1340959 # DTB write accesses
-system.cpu0.dtb.write_acv 117 # DTB write access violations
-system.cpu0.dtb.write_hits 5672960 # DTB write hits
-system.cpu0.dtb.write_misses 1770 # DTB write misses
-system.cpu0.idle_fraction 0.984893 # Percentage of idle cycles
-system.cpu0.itb.accesses 1981604 # ITB accesses
-system.cpu0.itb.acv 161 # ITB acv
-system.cpu0.itb.hits 1978255 # ITB hits
-system.cpu0.itb.misses 3349 # ITB misses
-system.cpu0.kern.callpal 176688 # number of callpals executed
+sim_insts 64909600 # Number of instructions simulated
+sim_seconds 1.859078 # Number of seconds simulated
+sim_ticks 3718155709 # Number of ticks simulated
+system.cpu0.dtb.accesses 544556 # DTB accesses
+system.cpu0.dtb.acv 335 # DTB access violations
+system.cpu0.dtb.hits 14841931 # DTB hits
+system.cpu0.dtb.misses 7356 # DTB misses
+system.cpu0.dtb.read_accesses 377530 # DTB read accesses
+system.cpu0.dtb.read_acv 210 # DTB read access violations
+system.cpu0.dtb.read_hits 8970576 # DTB read hits
+system.cpu0.dtb.read_misses 6581 # DTB read misses
+system.cpu0.dtb.write_accesses 167026 # DTB write accesses
+system.cpu0.dtb.write_acv 125 # DTB write access violations
+system.cpu0.dtb.write_hits 5871355 # DTB write hits
+system.cpu0.dtb.write_misses 775 # DTB write misses
+system.cpu0.idle_fraction 0.984943 # Percentage of idle cycles
+system.cpu0.itb.accesses 1436270 # ITB accesses
+system.cpu0.itb.acv 184 # ITB acv
+system.cpu0.itb.hits 1432801 # ITB hits
+system.cpu0.itb.misses 3469 # ITB misses
+system.cpu0.kern.callpal 182754 # number of callpals executed
system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
-system.cpu0.kern.callpal_wripir 97 0.05% 0.06% # number of callpals executed
+system.cpu0.kern.callpal_wripir 115 0.06% 0.06% # number of callpals executed
system.cpu0.kern.callpal_wrmces 1 0.00% 0.06% # number of callpals executed
system.cpu0.kern.callpal_wrfen 1 0.00% 0.06% # number of callpals executed
-system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.06% # number of callpals executed
-system.cpu0.kern.callpal_swpctx 1117 0.63% 0.69% # number of callpals executed
-system.cpu0.kern.callpal_tbi 44 0.02% 0.71% # number of callpals executed
-system.cpu0.kern.callpal_wrent 7 0.00% 0.72% # number of callpals executed
-system.cpu0.kern.callpal_swpipl 166811 94.41% 95.13% # number of callpals executed
-system.cpu0.kern.callpal_rdps 4911 2.78% 97.91% # number of callpals executed
-system.cpu0.kern.callpal_wrkgp 1 0.00% 97.91% # number of callpals executed
-system.cpu0.kern.callpal_wrusp 3 0.00% 97.91% # number of callpals executed
-system.cpu0.kern.callpal_rdusp 9 0.01% 97.91% # number of callpals executed
-system.cpu0.kern.callpal_whami 2 0.00% 97.92% # number of callpals executed
-system.cpu0.kern.callpal_rti 3236 1.83% 99.75% # number of callpals executed
-system.cpu0.kern.callpal_callsys 325 0.18% 99.93% # number of callpals executed
-system.cpu0.kern.callpal_imb 121 0.07% 100.00% # number of callpals executed
+system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.07% # number of callpals executed
+system.cpu0.kern.callpal_swpctx 3791 2.07% 2.14% # number of callpals executed
+system.cpu0.kern.callpal_tbi 49 0.03% 2.17% # number of callpals executed
+system.cpu0.kern.callpal_wrent 7 0.00% 2.17% # number of callpals executed
+system.cpu0.kern.callpal_swpipl 167832 91.83% 94.01% # number of callpals executed
+system.cpu0.kern.callpal_rdps 5780 3.16% 97.17% # number of callpals executed
+system.cpu0.kern.callpal_wrkgp 1 0.00% 97.17% # number of callpals executed
+system.cpu0.kern.callpal_wrusp 2 0.00% 97.17% # number of callpals executed
+system.cpu0.kern.callpal_rdusp 9 0.00% 97.17% # number of callpals executed
+system.cpu0.kern.callpal_whami 2 0.00% 97.18% # number of callpals executed
+system.cpu0.kern.callpal_rti 4696 2.57% 99.75% # number of callpals executed
+system.cpu0.kern.callpal_callsys 344 0.19% 99.93% # number of callpals executed
+system.cpu0.kern.callpal_imb 122 0.07% 100.00% # number of callpals executed
system.cpu0.kern.inst.arm 0 # number of arm instructions executed
-system.cpu0.kern.inst.hwrei 190918 # number of hwrei instructions executed
+system.cpu0.kern.inst.hwrei 196249 # number of hwrei instructions executed
system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu0.kern.inst.quiesce 1922 # number of quiesce instructions executed
-system.cpu0.kern.ipl_count 172116 # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_0 72060 41.87% 41.87% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_21 251 0.15% 42.01% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_22 5518 3.21% 45.22% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_30 7 0.00% 45.22% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_31 94280 54.78% 100.00% # number of times we switched to this ipl
-system.cpu0.kern.ipl_good 153515 # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_0 72019 46.91% 46.91% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_21 251 0.16% 47.08% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_22 5518 3.59% 50.67% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_30 7 0.00% 50.68% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_31 75720 49.32% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_ticks 3543835079 # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_0 3521923327 99.38% 99.38% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_21 39982 0.00% 99.38% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_22 1005040 0.03% 99.41% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_30 1756 0.00% 99.41% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_31 20864974 0.59% 100.00% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_used 0.891928 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.ipl_used_0 0.999431 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.inst.quiesce 6184 # number of quiesce instructions executed
+system.cpu0.kern.ipl_count 174678 # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_0 70736 40.50% 40.50% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_21 245 0.14% 40.64% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_22 1896 1.09% 41.72% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_30 8 0.00% 41.73% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_31 101793 58.27% 100.00% # number of times we switched to this ipl
+system.cpu0.kern.ipl_good 140889 # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_0 69374 49.24% 49.24% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_21 245 0.17% 49.41% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_22 1896 1.35% 50.76% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_30 8 0.01% 50.77% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_31 69366 49.23% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_ticks 3718155294 # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_0 3683661066 99.07% 99.07% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_21 40474 0.00% 99.07% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_22 163056 0.00% 99.08% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_30 2026 0.00% 99.08% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_31 34288672 0.92% 100.00% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_used 0.806564 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.ipl_used_0 0.980745 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.ipl_used_31 0.803140 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.mode_good_kernel 1277
-system.cpu0.kern.mode_good_user 1129
-system.cpu0.kern.mode_good_idle 148
-system.cpu0.kern.mode_switch_kernel 2253 # number of protection mode switches
-system.cpu0.kern.mode_switch_user 1129 # number of protection mode switches
-system.cpu0.kern.mode_switch_idle 2074 # number of protection mode switches
-system.cpu0.kern.mode_switch_good 0.468109 # fraction of useful protection mode switches
-system.cpu0.kern.mode_switch_good_kernel 0.566800 # fraction of useful protection mode switches
+system.cpu0.kern.ipl_used_31 0.681442 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.mode_good_kernel 1192
+system.cpu0.kern.mode_good_user 1193
+system.cpu0.kern.mode_good_idle 0
+system.cpu0.kern.mode_switch_kernel 7143 # number of protection mode switches
+system.cpu0.kern.mode_switch_user 1193 # number of protection mode switches
+system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches
+system.cpu0.kern.mode_switch_good 0.286108 # fraction of useful protection mode switches
+system.cpu0.kern.mode_switch_good_kernel 0.166877 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu0.kern.mode_switch_good_idle 0.071360 # fraction of useful protection mode switches
-system.cpu0.kern.mode_ticks_kernel 28710240 0.81% 0.81% # number of ticks spent at the given mode
-system.cpu0.kern.mode_ticks_user 2184201 0.06% 0.87% # number of ticks spent at the given mode
-system.cpu0.kern.mode_ticks_idle 3512891779 99.13% 100.00% # number of ticks spent at the given mode
-system.cpu0.kern.swap_context 1118 # number of times the context was actually changed
-system.cpu0.kern.syscall 192 # number of syscalls executed
-system.cpu0.kern.syscall_fork 7 3.65% 3.65% # number of syscalls executed
-system.cpu0.kern.syscall_read 13 6.77% 10.42% # number of syscalls executed
-system.cpu0.kern.syscall_write 4 2.08% 12.50% # number of syscalls executed
-system.cpu0.kern.syscall_close 28 14.58% 27.08% # number of syscalls executed
-system.cpu0.kern.syscall_chdir 1 0.52% 27.60% # number of syscalls executed
-system.cpu0.kern.syscall_obreak 7 3.65% 31.25% # number of syscalls executed
-system.cpu0.kern.syscall_lseek 6 3.12% 34.37% # number of syscalls executed
-system.cpu0.kern.syscall_getpid 4 2.08% 36.46% # number of syscalls executed
-system.cpu0.kern.syscall_setuid 1 0.52% 36.98% # number of syscalls executed
-system.cpu0.kern.syscall_getuid 3 1.56% 38.54% # number of syscalls executed
-system.cpu0.kern.syscall_access 7 3.65% 42.19% # number of syscalls executed
-system.cpu0.kern.syscall_dup 2 1.04% 43.23% # number of syscalls executed
-system.cpu0.kern.syscall_open 34 17.71% 60.94% # number of syscalls executed
-system.cpu0.kern.syscall_getgid 3 1.56% 62.50% # number of syscalls executed
-system.cpu0.kern.syscall_sigprocmask 8 4.17% 66.67% # number of syscalls executed
-system.cpu0.kern.syscall_ioctl 9 4.69% 71.35% # number of syscalls executed
-system.cpu0.kern.syscall_readlink 1 0.52% 71.87% # number of syscalls executed
-system.cpu0.kern.syscall_execve 5 2.60% 74.48% # number of syscalls executed
-system.cpu0.kern.syscall_mmap 22 11.46% 85.94% # number of syscalls executed
-system.cpu0.kern.syscall_munmap 2 1.04% 86.98% # number of syscalls executed
-system.cpu0.kern.syscall_mprotect 6 3.12% 90.10% # number of syscalls executed
-system.cpu0.kern.syscall_gethostname 1 0.52% 90.62% # number of syscalls executed
-system.cpu0.kern.syscall_dup2 2 1.04% 91.67% # number of syscalls executed
-system.cpu0.kern.syscall_fcntl 8 4.17% 95.83% # number of syscalls executed
-system.cpu0.kern.syscall_socket 2 1.04% 96.87% # number of syscalls executed
-system.cpu0.kern.syscall_connect 2 1.04% 97.92% # number of syscalls executed
-system.cpu0.kern.syscall_setgid 1 0.52% 98.44% # number of syscalls executed
-system.cpu0.kern.syscall_getrlimit 1 0.52% 98.96% # number of syscalls executed
-system.cpu0.kern.syscall_setsid 2 1.04% 100.00% # number of syscalls executed
-system.cpu0.not_idle_fraction 0.015107 # Percentage of non-idle cycles
-system.cpu0.numCycles 53543489 # number of cpu cycles simulated
-system.cpu0.num_insts 53539979 # Number of instructions executed
-system.cpu0.num_refs 12727196 # Number of memory references
-system.cpu1.dtb.accesses 460215 # DTB accesses
-system.cpu1.dtb.acv 72 # DTB access violations
-system.cpu1.dtb.hits 2012555 # DTB hits
-system.cpu1.dtb.misses 4236 # DTB misses
-system.cpu1.dtb.read_accesses 319867 # DTB read accesses
-system.cpu1.dtb.read_acv 26 # DTB read access violations
-system.cpu1.dtb.read_hits 1276251 # DTB read hits
-system.cpu1.dtb.read_misses 3800 # DTB read misses
-system.cpu1.dtb.write_accesses 140348 # DTB write accesses
-system.cpu1.dtb.write_acv 46 # DTB write access violations
-system.cpu1.dtb.write_hits 736304 # DTB write hits
-system.cpu1.dtb.write_misses 436 # DTB write misses
-system.cpu1.idle_fraction 0.997806 # Percentage of idle cycles
-system.cpu1.itb.accesses 1302484 # ITB accesses
-system.cpu1.itb.acv 23 # ITB acv
-system.cpu1.itb.hits 1300768 # ITB hits
-system.cpu1.itb.misses 1716 # ITB misses
-system.cpu1.kern.callpal 27118 # number of callpals executed
+system.cpu0.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
+system.cpu0.kern.mode_ticks_kernel 3716512331 99.96% 99.96% # number of ticks spent at the given mode
+system.cpu0.kern.mode_ticks_user 1642961 0.04% 100.00% # number of ticks spent at the given mode
+system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
+system.cpu0.kern.swap_context 3792 # number of times the context was actually changed
+system.cpu0.kern.syscall 199 # number of syscalls executed
+system.cpu0.kern.syscall_fork 8 4.02% 4.02% # number of syscalls executed
+system.cpu0.kern.syscall_read 17 8.54% 12.56% # number of syscalls executed
+system.cpu0.kern.syscall_write 4 2.01% 14.57% # number of syscalls executed
+system.cpu0.kern.syscall_close 29 14.57% 29.15% # number of syscalls executed
+system.cpu0.kern.syscall_chdir 1 0.50% 29.65% # number of syscalls executed
+system.cpu0.kern.syscall_obreak 4 2.01% 31.66% # number of syscalls executed
+system.cpu0.kern.syscall_lseek 10 5.03% 36.68% # number of syscalls executed
+system.cpu0.kern.syscall_getpid 6 3.02% 39.70% # number of syscalls executed
+system.cpu0.kern.syscall_setuid 1 0.50% 40.20% # number of syscalls executed
+system.cpu0.kern.syscall_getuid 3 1.51% 41.71% # number of syscalls executed
+system.cpu0.kern.syscall_access 6 3.02% 44.72% # number of syscalls executed
+system.cpu0.kern.syscall_dup 2 1.01% 45.73% # number of syscalls executed
+system.cpu0.kern.syscall_open 31 15.58% 61.31% # number of syscalls executed
+system.cpu0.kern.syscall_getgid 3 1.51% 62.81% # number of syscalls executed
+system.cpu0.kern.syscall_sigprocmask 10 5.03% 67.84% # number of syscalls executed
+system.cpu0.kern.syscall_ioctl 9 4.52% 72.36% # number of syscalls executed
+system.cpu0.kern.syscall_execve 6 3.02% 75.38% # number of syscalls executed
+system.cpu0.kern.syscall_mmap 20 10.05% 85.43% # number of syscalls executed
+system.cpu0.kern.syscall_munmap 3 1.51% 86.93% # number of syscalls executed
+system.cpu0.kern.syscall_mprotect 5 2.51% 89.45% # number of syscalls executed
+system.cpu0.kern.syscall_gethostname 1 0.50% 89.95% # number of syscalls executed
+system.cpu0.kern.syscall_dup2 3 1.51% 91.46% # number of syscalls executed
+system.cpu0.kern.syscall_fcntl 8 4.02% 95.48% # number of syscalls executed
+system.cpu0.kern.syscall_socket 2 1.01% 96.48% # number of syscalls executed
+system.cpu0.kern.syscall_connect 2 1.01% 97.49% # number of syscalls executed
+system.cpu0.kern.syscall_setgid 1 0.50% 97.99% # number of syscalls executed
+system.cpu0.kern.syscall_getrlimit 2 1.01% 98.99% # number of syscalls executed
+system.cpu0.kern.syscall_setsid 2 1.01% 100.00% # number of syscalls executed
+system.cpu0.not_idle_fraction 0.015057 # Percentage of non-idle cycles
+system.cpu0.numCycles 55984201 # number of cpu cycles simulated
+system.cpu0.num_insts 55980548 # Number of instructions executed
+system.cpu0.num_refs 15081320 # Number of memory references
+system.cpu1.dtb.accesses 761000 # DTB accesses
+system.cpu1.dtb.acv 32 # DTB access violations
+system.cpu1.dtb.hits 2653187 # DTB hits
+system.cpu1.dtb.misses 4173 # DTB misses
+system.cpu1.dtb.read_accesses 523552 # DTB read accesses
+system.cpu1.dtb.read_acv 0 # DTB read access violations
+system.cpu1.dtb.read_hits 1675663 # DTB read hits
+system.cpu1.dtb.read_misses 3798 # DTB read misses
+system.cpu1.dtb.write_accesses 237448 # DTB write accesses
+system.cpu1.dtb.write_acv 32 # DTB write access violations
+system.cpu1.dtb.write_hits 977524 # DTB write hits
+system.cpu1.dtb.write_misses 375 # DTB write misses
+system.cpu1.idle_fraction 0.997598 # Percentage of idle cycles
+system.cpu1.itb.accesses 1845187 # ITB accesses
+system.cpu1.itb.acv 0 # ITB acv
+system.cpu1.itb.hits 1843600 # ITB hits
+system.cpu1.itb.misses 1587 # ITB misses
+system.cpu1.kern.callpal 34405 # number of callpals executed
system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
-system.cpu1.kern.callpal_wripir 7 0.03% 0.03% # number of callpals executed
+system.cpu1.kern.callpal_wripir 8 0.02% 0.03% # number of callpals executed
system.cpu1.kern.callpal_wrmces 1 0.00% 0.03% # number of callpals executed
-system.cpu1.kern.callpal_wrfen 1 0.00% 0.04% # number of callpals executed
-system.cpu1.kern.callpal_swpctx 515 1.90% 1.94% # number of callpals executed
-system.cpu1.kern.callpal_tbi 10 0.04% 1.97% # number of callpals executed
-system.cpu1.kern.callpal_wrent 7 0.03% 2.00% # number of callpals executed
-system.cpu1.kern.callpal_swpipl 23496 86.64% 88.64% # number of callpals executed
-system.cpu1.kern.callpal_rdps 251 0.93% 89.57% # number of callpals executed
-system.cpu1.kern.callpal_wrkgp 1 0.00% 89.57% # number of callpals executed
-system.cpu1.kern.callpal_wrusp 4 0.01% 89.59% # number of callpals executed
-system.cpu1.kern.callpal_rdusp 1 0.00% 89.59% # number of callpals executed
-system.cpu1.kern.callpal_whami 3 0.01% 89.60% # number of callpals executed
-system.cpu1.kern.callpal_rti 2552 9.41% 99.01% # number of callpals executed
-system.cpu1.kern.callpal_callsys 208 0.77% 99.78% # number of callpals executed
-system.cpu1.kern.callpal_imb 59 0.22% 100.00% # number of callpals executed
+system.cpu1.kern.callpal_wrfen 1 0.00% 0.03% # number of callpals executed
+system.cpu1.kern.callpal_swpctx 468 1.36% 1.39% # number of callpals executed
+system.cpu1.kern.callpal_tbi 5 0.01% 1.41% # number of callpals executed
+system.cpu1.kern.callpal_wrent 7 0.02% 1.43% # number of callpals executed
+system.cpu1.kern.callpal_swpipl 28030 81.47% 82.90% # number of callpals executed
+system.cpu1.kern.callpal_rdps 3042 8.84% 91.74% # number of callpals executed
+system.cpu1.kern.callpal_wrkgp 1 0.00% 91.74% # number of callpals executed
+system.cpu1.kern.callpal_wrusp 5 0.01% 91.76% # number of callpals executed
+system.cpu1.kern.callpal_whami 3 0.01% 91.77% # number of callpals executed
+system.cpu1.kern.callpal_rti 2586 7.52% 99.28% # number of callpals executed
+system.cpu1.kern.callpal_callsys 187 0.54% 99.83% # number of callpals executed
+system.cpu1.kern.callpal_imb 59 0.17% 100.00% # number of callpals executed
system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed
system.cpu1.kern.inst.arm 0 # number of arm instructions executed
-system.cpu1.kern.inst.hwrei 35069 # number of hwrei instructions executed
+system.cpu1.kern.inst.hwrei 42209 # number of hwrei instructions executed
system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu1.kern.inst.quiesce 1947 # number of quiesce instructions executed
-system.cpu1.kern.ipl_count 27951 # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_0 10084 36.08% 36.08% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_22 5485 19.62% 55.70% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_30 97 0.35% 56.05% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_31 12285 43.95% 100.00% # number of times we switched to this ipl
-system.cpu1.kern.ipl_good 27484 # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_0 10061 36.61% 36.61% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_22 5485 19.96% 56.56% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_30 97 0.35% 56.92% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_31 11841 43.08% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_ticks 3544246744 # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_0 3521927913 99.37% 99.37% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_22 1037048 0.03% 99.40% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_30 25211 0.00% 99.40% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_31 21256572 0.60% 100.00% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_used 0.983292 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.ipl_used_0 0.997719 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.inst.quiesce 2146 # number of quiesce instructions executed
+system.cpu1.kern.ipl_count 32627 # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_0 11165 34.22% 34.22% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_22 1895 5.81% 40.03% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_30 115 0.35% 40.38% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_31 19452 59.62% 100.00% # number of times we switched to this ipl
+system.cpu1.kern.ipl_good 24195 # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_0 11150 46.08% 46.08% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_22 1895 7.83% 53.92% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_30 115 0.48% 54.39% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_31 11035 45.61% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_ticks 3717733449 # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_0 3695802393 99.41% 99.41% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_22 162970 0.00% 99.41% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_30 29122 0.00% 99.42% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_31 21738964 0.58% 100.00% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_used 0.741564 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.ipl_used_0 0.998657 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.ipl_used_31 0.963858 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.mode_good_kernel 636
-system.cpu1.kern.mode_good_user 637
-system.cpu1.kern.mode_good_idle 0
-system.cpu1.kern.mode_switch_kernel 3063 # number of protection mode switches
-system.cpu1.kern.mode_switch_user 637 # number of protection mode switches
-system.cpu1.kern.mode_switch_idle 0 # number of protection mode switches
-system.cpu1.kern.mode_switch_good 0.344054 # fraction of useful protection mode switches
-system.cpu1.kern.mode_switch_good_kernel 0.207640 # fraction of useful protection mode switches
+system.cpu1.kern.ipl_used_31 0.567294 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.mode_good_kernel 602
+system.cpu1.kern.mode_good_user 563
+system.cpu1.kern.mode_good_idle 39
+system.cpu1.kern.mode_switch_kernel 1011 # number of protection mode switches
+system.cpu1.kern.mode_switch_user 563 # number of protection mode switches
+system.cpu1.kern.mode_switch_idle 2045 # number of protection mode switches
+system.cpu1.kern.mode_switch_good 0.332689 # fraction of useful protection mode switches
+system.cpu1.kern.mode_switch_good_kernel 0.595450 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu1.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
-system.cpu1.kern.mode_ticks_kernel 3542834137 99.96% 99.96% # number of ticks spent at the given mode
-system.cpu1.kern.mode_ticks_user 1412605 0.04% 100.00% # number of ticks spent at the given mode
-system.cpu1.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
-system.cpu1.kern.swap_context 516 # number of times the context was actually changed
-system.cpu1.kern.syscall 137 # number of syscalls executed
-system.cpu1.kern.syscall_fork 1 0.73% 0.73% # number of syscalls executed
-system.cpu1.kern.syscall_read 17 12.41% 13.14% # number of syscalls executed
-system.cpu1.kern.syscall_close 15 10.95% 24.09% # number of syscalls executed
-system.cpu1.kern.syscall_chmod 1 0.73% 24.82% # number of syscalls executed
-system.cpu1.kern.syscall_obreak 8 5.84% 30.66% # number of syscalls executed
-system.cpu1.kern.syscall_lseek 4 2.92% 33.58% # number of syscalls executed
-system.cpu1.kern.syscall_getpid 2 1.46% 35.04% # number of syscalls executed
-system.cpu1.kern.syscall_setuid 3 2.19% 37.23% # number of syscalls executed
-system.cpu1.kern.syscall_getuid 3 2.19% 39.42% # number of syscalls executed
-system.cpu1.kern.syscall_access 4 2.92% 42.34% # number of syscalls executed
-system.cpu1.kern.syscall_open 21 15.33% 57.66% # number of syscalls executed
-system.cpu1.kern.syscall_getgid 3 2.19% 59.85% # number of syscalls executed
-system.cpu1.kern.syscall_sigprocmask 2 1.46% 61.31% # number of syscalls executed
-system.cpu1.kern.syscall_ioctl 1 0.73% 62.04% # number of syscalls executed
-system.cpu1.kern.syscall_execve 2 1.46% 63.50% # number of syscalls executed
-system.cpu1.kern.syscall_mmap 32 23.36% 86.86% # number of syscalls executed
-system.cpu1.kern.syscall_munmap 1 0.73% 87.59% # number of syscalls executed
-system.cpu1.kern.syscall_mprotect 10 7.30% 94.89% # number of syscalls executed
-system.cpu1.kern.syscall_dup2 1 0.73% 95.62% # number of syscalls executed
-system.cpu1.kern.syscall_fcntl 2 1.46% 97.08% # number of syscalls executed
-system.cpu1.kern.syscall_setgid 3 2.19% 99.27% # number of syscalls executed
-system.cpu1.kern.syscall_getrlimit 1 0.73% 100.00% # number of syscalls executed
-system.cpu1.not_idle_fraction 0.002194 # Percentage of non-idle cycles
-system.cpu1.numCycles 7776377 # number of cpu cycles simulated
-system.cpu1.num_insts 7774638 # Number of instructions executed
-system.cpu1.num_refs 2025195 # Number of memory references
+system.cpu1.kern.mode_switch_good_idle 0.019071 # fraction of useful protection mode switches
+system.cpu1.kern.mode_ticks_kernel 4713507 0.13% 0.13% # number of ticks spent at the given mode
+system.cpu1.kern.mode_ticks_user 1950903 0.05% 0.18% # number of ticks spent at the given mode
+system.cpu1.kern.mode_ticks_idle 3710606044 99.82% 100.00% # number of ticks spent at the given mode
+system.cpu1.kern.swap_context 469 # number of times the context was actually changed
+system.cpu1.kern.syscall 130 # number of syscalls executed
+system.cpu1.kern.syscall_read 13 10.00% 10.00% # number of syscalls executed
+system.cpu1.kern.syscall_close 14 10.77% 20.77% # number of syscalls executed
+system.cpu1.kern.syscall_chmod 1 0.77% 21.54% # number of syscalls executed
+system.cpu1.kern.syscall_obreak 11 8.46% 30.00% # number of syscalls executed
+system.cpu1.kern.syscall_setuid 3 2.31% 32.31% # number of syscalls executed
+system.cpu1.kern.syscall_getuid 3 2.31% 34.62% # number of syscalls executed
+system.cpu1.kern.syscall_access 5 3.85% 38.46% # number of syscalls executed
+system.cpu1.kern.syscall_open 24 18.46% 56.92% # number of syscalls executed
+system.cpu1.kern.syscall_getgid 3 2.31% 59.23% # number of syscalls executed
+system.cpu1.kern.syscall_ioctl 1 0.77% 60.00% # number of syscalls executed
+system.cpu1.kern.syscall_readlink 1 0.77% 60.77% # number of syscalls executed
+system.cpu1.kern.syscall_execve 1 0.77% 61.54% # number of syscalls executed
+system.cpu1.kern.syscall_mmap 34 26.15% 87.69% # number of syscalls executed
+system.cpu1.kern.syscall_mprotect 11 8.46% 96.15% # number of syscalls executed
+system.cpu1.kern.syscall_fcntl 2 1.54% 97.69% # number of syscalls executed
+system.cpu1.kern.syscall_setgid 3 2.31% 100.00% # number of syscalls executed
+system.cpu1.not_idle_fraction 0.002402 # Percentage of non-idle cycles
+system.cpu1.numCycles 8930639 # number of cpu cycles simulated
+system.cpu1.num_insts 8929052 # Number of instructions executed
+system.cpu1.num_refs 2665347 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
-system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes.
-system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes.
-system.disk0.dma_write_txs 412 # Number of DMA write transactions.
+system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
+system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
+system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr
index fe3ad68ab..14aa2c9ff 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr
@@ -3,4 +3,4 @@ Listening for console connection on port 3456
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000
0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001
warn: Entering event queue @ 0. Starting simulation...
-warn: 195722: Trying to launch CPU number 1!
+warn: 195723: Trying to launch CPU number 1!
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout
index 039088577..18365db1c 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 5 2006 22:13:02
-M5 started Fri Oct 6 00:24:12 2006
-M5 executing on zizzer.eecs.umich.edu
+M5 compiled Oct 8 2006 21:57:24
+M5 started Sun Oct 8 21:58:13 2006
+M5 executing on zed.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual
-Exiting @ tick 3544247159 because m5_exit instruction encountered
+Exiting @ tick 3718155709 because m5_exit instruction encountered
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini
index e30428078..bdd7566bc 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini
@@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=AtomicSimpleCPU
children=dtb itb
clock=1
-cpu_id=-1
+cpu_id=0
defer_registration=false
dtb=system.cpu.dtb
function_trace=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out
index ea63dce8b..bc2f45a5e 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out
@@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
itb=system.cpu.itb
dtb=system.cpu.dtb
-cpu_id=-1
profile=0
clock=1
defer_registration=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console
index d6e3955cc..5461cc4ab 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console
@@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
- kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x1
+ kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x1
CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 1 processor(s)
KSP: 0x20043FE8 PTBR 0x20
@@ -14,28 +14,26 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ConsoleDispatch at virt 10000658 phys 18658 val FFFFFC00000100A8
unix_boot_mem ends at FFFFFC0000076000
k_argc = 0
- jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028)
- CallbackFixup 0 18000, t7=FFFFFC0000700000
- Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005
+ jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
+ CallbackFixup 0 18000, t7=FFFFFC000070C000
+ Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384
- freeing pages 1030:16384
- reserving pages 1030:1031
+ freeing pages 1069:16384
+ reserving pages 1069:1070
SMP: 1 CPUs probed -- cpu_present_mask = 1
Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0
- PID hash table entries: 1024 (order 10: 16384 bytes)
+ PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900
Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
- Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init)
- Mount-cache hash table entries: 512 (order: 0, 8192 bytes)
- per-CPU timeslice cutoff: 374.49 usecs.
- task migration cache decay timeout: 0 msecs.
+ Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init)
+ Mount-cache hash table entries: 512
SMP mode deactivated.
Brought up 1 CPUs
SMP: Total of 1 processors activated (4002.20 BogoMIPS).
@@ -48,16 +46,21 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12
- Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled
+ Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
+ io scheduler noop registered
+ io scheduler anticipatory registered
+ io scheduler deadline registered
+ io scheduler cfq registered
loop: loaded (max 8 devices)
- Using anticipatory io scheduler
nbd: registered device at major 43
- sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver
- eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg
+ eth0: using 64 bit addressing.
+ eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
+ tun: Universal TUN/TAP device driver, 1.6
+ tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0
@@ -70,24 +73,23 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
+ hda: cache flushes not supported
hda: hda1
hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
+ hdb: cache flushes not supported
hdb: unknown partition table
- scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
- Vendor: Linux Model: scsi_m5 Li Rev: 0004
- Type: Direct-Access ANSI SCSI revision: 03
- SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
- SCSI device sda: drive cache: write back
- sda: unknown partition table
- Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2
- IP: routing cache hash table of 1024 buckets, 16Kbytes
- TCP: Hash tables configured (established 8192 bind 8192)
- ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack
+ IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
+ TCP established hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP: Hash tables configured (established 16384 bind 16384)
+ TCP reno registered
+ ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller
+ TCP bic registered
Initializing IPsec netlink socket
NET: Registered protocol family 1
NET: Registered protocol family 17
@@ -96,7 +98,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly.
- Freeing unused kernel memory: 480k freed
+ Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems...
loading script...
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt
index 5c403c0a9..e276e91a7 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt
@@ -1,86 +1,86 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 1121378 # Simulator instruction rate (inst/s)
-host_mem_usage 194272 # Number of bytes of host memory used
-host_seconds 51.72 # Real time elapsed on the host
-host_tick_rate 67313414 # Simulator tick rate (ticks/s)
+host_inst_rate 1389289 # Simulator instruction rate (inst/s)
+host_mem_usage 197652 # Number of bytes of host memory used
+host_seconds 44.48 # Real time elapsed on the host
+host_tick_rate 81712411 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
-sim_insts 58001813 # Number of instructions simulated
-sim_seconds 1.740863 # Number of seconds simulated
-sim_ticks 3481726167 # Number of ticks simulated
-system.cpu.dtb.accesses 2309470 # DTB accesses
+sim_insts 61788439 # Number of instructions simulated
+sim_seconds 1.817090 # Number of seconds simulated
+sim_ticks 3634179176 # Number of ticks simulated
+system.cpu.dtb.accesses 1304494 # DTB accesses
system.cpu.dtb.acv 367 # DTB access violations
-system.cpu.dtb.hits 13711941 # DTB hits
-system.cpu.dtb.misses 12493 # DTB misses
-system.cpu.dtb.read_accesses 828530 # DTB read accesses
+system.cpu.dtb.hits 16552094 # DTB hits
+system.cpu.dtb.misses 11425 # DTB misses
+system.cpu.dtb.read_accesses 900425 # DTB read accesses
system.cpu.dtb.read_acv 210 # DTB read access violations
-system.cpu.dtb.read_hits 7597829 # DTB read hits
-system.cpu.dtb.read_misses 10298 # DTB read misses
-system.cpu.dtb.write_accesses 1480940 # DTB write accesses
+system.cpu.dtb.read_hits 10038384 # DTB read hits
+system.cpu.dtb.read_misses 10280 # DTB read misses
+system.cpu.dtb.write_accesses 404069 # DTB write accesses
system.cpu.dtb.write_acv 157 # DTB write access violations
-system.cpu.dtb.write_hits 6114112 # DTB write hits
-system.cpu.dtb.write_misses 2195 # DTB write misses
-system.cpu.idle_fraction 0.983340 # Percentage of idle cycles
-system.cpu.itb.accesses 3281346 # ITB accesses
+system.cpu.dtb.write_hits 6513710 # DTB write hits
+system.cpu.dtb.write_misses 1145 # DTB write misses
+system.cpu.idle_fraction 0.982997 # Percentage of idle cycles
+system.cpu.itb.accesses 3281310 # ITB accesses
system.cpu.itb.acv 184 # ITB acv
-system.cpu.itb.hits 3276356 # ITB hits
+system.cpu.itb.hits 3276320 # ITB hits
system.cpu.itb.misses 4990 # ITB misses
-system.cpu.kern.callpal 182718 # number of callpals executed
+system.cpu.kern.callpal 193842 # number of callpals executed
system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed
-system.cpu.kern.callpal_swpctx 1574 0.86% 0.86% # number of callpals executed
-system.cpu.kern.callpal_tbi 54 0.03% 0.89% # number of callpals executed
-system.cpu.kern.callpal_wrent 7 0.00% 0.90% # number of callpals executed
-system.cpu.kern.callpal_swpipl 171359 93.78% 94.68% # number of callpals executed
-system.cpu.kern.callpal_rdps 5159 2.82% 97.50% # number of callpals executed
-system.cpu.kern.callpal_wrkgp 1 0.00% 97.50% # number of callpals executed
-system.cpu.kern.callpal_wrusp 7 0.00% 97.51% # number of callpals executed
-system.cpu.kern.callpal_rdusp 10 0.01% 97.51% # number of callpals executed
-system.cpu.kern.callpal_whami 2 0.00% 97.51% # number of callpals executed
-system.cpu.kern.callpal_rti 3829 2.10% 99.61% # number of callpals executed
-system.cpu.kern.callpal_callsys 531 0.29% 99.90% # number of callpals executed
-system.cpu.kern.callpal_imb 181 0.10% 100.00% # number of callpals executed
+system.cpu.kern.callpal_swpctx 4203 2.17% 2.17% # number of callpals executed
+system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed
+system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed
+system.cpu.kern.callpal_swpipl 176751 91.18% 93.38% # number of callpals executed
+system.cpu.kern.callpal_rdps 6881 3.55% 96.93% # number of callpals executed
+system.cpu.kern.callpal_wrkgp 1 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_rti 5211 2.69% 99.63% # number of callpals executed
+system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed
+system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed
system.cpu.kern.inst.arm 0 # number of arm instructions executed
-system.cpu.kern.inst.hwrei 202783 # number of hwrei instructions executed
+system.cpu.kern.inst.hwrei 212908 # number of hwrei instructions executed
system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu.kern.inst.quiesce 1877 # number of quiesce instructions executed
-system.cpu.kern.ipl_count 177218 # number of times we switched to this ipl
-system.cpu.kern.ipl_count_0 74624 42.11% 42.11% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_21 251 0.14% 42.25% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_22 5425 3.06% 45.31% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_31 96918 54.69% 100.00% # number of times we switched to this ipl
-system.cpu.kern.ipl_good 158463 # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_0 74570 47.06% 47.06% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_21 251 0.16% 47.22% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_22 5425 3.42% 50.64% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_31 78217 49.36% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_ticks 3481725752 # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_0 3459659082 99.37% 99.37% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_21 39982 0.00% 99.37% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_22 930159 0.03% 99.39% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_31 21096529 0.61% 100.00% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_used 0.894170 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.ipl_used_0 0.999276 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.inst.quiesce 6207 # number of quiesce instructions executed
+system.cpu.kern.ipl_count 184061 # number of times we switched to this ipl
+system.cpu.kern.ipl_count_0 75348 40.94% 40.94% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_21 245 0.13% 41.07% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_22 1853 1.01% 42.08% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_31 106615 57.92% 100.00% # number of times we switched to this ipl
+system.cpu.kern.ipl_good 150060 # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_0 73981 49.30% 49.30% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_22 1853 1.23% 50.70% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_31 73981 49.30% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_ticks 3634178761 # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_0 3599646819 99.05% 99.05% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_21 40474 0.00% 99.05% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_22 159358 0.00% 99.06% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_31 34332110 0.94% 100.00% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_used 0.815273 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.ipl_used_0 0.981858 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.ipl_used_31 0.807043 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.mode_good_kernel 1939
-system.cpu.kern.mode_good_user 1757
-system.cpu.kern.mode_good_idle 182
-system.cpu.kern.mode_switch_kernel 3320 # number of protection mode switches
-system.cpu.kern.mode_switch_user 1757 # number of protection mode switches
-system.cpu.kern.mode_switch_idle 2061 # number of protection mode switches
-system.cpu.kern.mode_switch_good 0.543289 # fraction of useful protection mode switches
-system.cpu.kern.mode_switch_good_kernel 0.584036 # fraction of useful protection mode switches
+system.cpu.kern.ipl_used_31 0.693908 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.mode_good_kernel 1938
+system.cpu.kern.mode_good_user 1758
+system.cpu.kern.mode_good_idle 180
+system.cpu.kern.mode_switch_kernel 5978 # number of protection mode switches
+system.cpu.kern.mode_switch_user 1758 # number of protection mode switches
+system.cpu.kern.mode_switch_idle 2102 # number of protection mode switches
+system.cpu.kern.mode_switch_good 0.393983 # fraction of useful protection mode switches
+system.cpu.kern.mode_switch_good_kernel 0.324189 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu.kern.mode_switch_good_idle 0.088307 # fraction of useful protection mode switches
-system.cpu.kern.mode_ticks_kernel 31887159 0.92% 0.92% # number of ticks spent at the given mode
-system.cpu.kern.mode_ticks_user 3591270 0.10% 1.02% # number of ticks spent at the given mode
-system.cpu.kern.mode_ticks_idle 3446247321 98.98% 100.00% # number of ticks spent at the given mode
-system.cpu.kern.swap_context 1575 # number of times the context was actually changed
+system.cpu.kern.mode_switch_good_idle 0.085633 # fraction of useful protection mode switches
+system.cpu.kern.mode_ticks_kernel 54682435 1.50% 1.50% # number of ticks spent at the given mode
+system.cpu.kern.mode_ticks_user 3591244 0.10% 1.60% # number of ticks spent at the given mode
+system.cpu.kern.mode_ticks_idle 3575905080 98.40% 100.00% # number of ticks spent at the given mode
+system.cpu.kern.swap_context 4204 # number of times the context was actually changed
system.cpu.kern.syscall 329 # number of syscalls executed
system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed
system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed
@@ -112,16 +112,16 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu
system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed
system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed
system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed
-system.cpu.not_idle_fraction 0.016660 # Percentage of non-idle cycles
-system.cpu.numCycles 58006987 # number of cpu cycles simulated
-system.cpu.num_insts 58001813 # Number of instructions executed
-system.cpu.num_refs 13757191 # Number of memory references
+system.cpu.not_idle_fraction 0.017003 # Percentage of non-idle cycles
+system.cpu.numCycles 61793613 # number of cpu cycles simulated
+system.cpu.num_insts 61788439 # Number of instructions executed
+system.cpu.num_refs 16800623 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
-system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes.
-system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes.
-system.disk0.dma_write_txs 412 # Number of DMA write transactions.
+system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
+system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
+system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout
index b3b3e8704..bb7f4ca1e 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 5 2006 22:13:02
-M5 started Fri Oct 6 00:23:19 2006
-M5 executing on zizzer.eecs.umich.edu
+M5 compiled Oct 8 2006 21:57:24
+M5 started Sun Oct 8 21:57:28 2006
+M5 executing on zed.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic
-Exiting @ tick 3481726167 because m5_exit instruction encountered
+Exiting @ tick 3634179176 because m5_exit instruction encountered
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini
index 65401b549..8f75c9525 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini
@@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=TimingSimpleCPU
children=dtb itb
clock=1
-cpu_id=-1
+cpu_id=0
defer_registration=false
dtb=system.cpu0.dtb
function_trace=false
@@ -104,7 +104,7 @@ size=48
type=TimingSimpleCPU
children=dtb itb
clock=1
-cpu_id=-1
+cpu_id=1
defer_registration=false
dtb=system.cpu1.dtb
function_trace=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out
index ed03e445d..9e0948f1e 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out
@@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
itb=system.cpu0.itb
dtb=system.cpu0.dtb
-cpu_id=-1
profile=0
clock=1
defer_registration=false
@@ -118,9 +118,9 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=1
itb=system.cpu1.itb
dtb=system.cpu1.dtb
-cpu_id=-1
profile=0
clock=1
defer_registration=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console
index 4a397ddbf..27adebb82 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console
@@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
- kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x2
+ kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x2
CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 2 processor(s)
KSP: 0x20043FE8 PTBR 0x20
@@ -16,29 +16,27 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Bootstraping CPU 1 with sp=0xFFFFFC0000076000
unix_boot_mem ends at FFFFFC0000078000
k_argc = 0
- jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028)
- CallbackFixup 0 18000, t7=FFFFFC0000700000
+ jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
+ CallbackFixup 0 18000, t7=FFFFFC000070C000
Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400
- Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005
+ Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384
- freeing pages 1030:16384
- reserving pages 1030:1031
+ freeing pages 1069:16384
+ reserving pages 1069:1070
SMP: 2 CPUs probed -- cpu_present_mask = 3
Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0
- PID hash table entries: 1024 (order 10: 16384 bytes)
+ PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900
Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
- Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init)
- Mount-cache hash table entries: 512 (order: 0, 8192 bytes)
- per-CPU timeslice cutoff: 374.49 usecs.
- task migration cache decay timeout: 0 msecs.
+ Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init)
+ Mount-cache hash table entries: 512
SMP starting up secondaries.
Slave CPU 1 console command START
SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb FFFFFC0000018400 my_rpb_phys 18400
@@ -53,16 +51,21 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12
- Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled
+ Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
+ io scheduler noop registered
+ io scheduler anticipatory registered
+ io scheduler deadline registered
+ io scheduler cfq registered
loop: loaded (max 8 devices)
- Using anticipatory io scheduler
nbd: registered device at major 43
- sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver
- eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg
+ eth0: using 64 bit addressing.
+ eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
+ tun: Universal TUN/TAP device driver, 1.6
+ tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0
@@ -75,24 +78,23 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
+ hda: cache flushes not supported
hda: hda1
hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
+ hdb: cache flushes not supported
hdb: unknown partition table
- scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
- Vendor: Linux Model: scsi_m5 Li Rev: 0004
- Type: Direct-Access ANSI SCSI revision: 03
- SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
- SCSI device sda: drive cache: write back
- sda: unknown partition table
- Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2
- IP: routing cache hash table of 1024 buckets, 16Kbytes
- TCP: Hash tables configured (established 8192 bind 8192)
- ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack
+ IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
+ TCP established hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP: Hash tables configured (established 16384 bind 16384)
+ TCP reno registered
+ ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller
+ TCP bic registered
Initializing IPsec netlink socket
NET: Registered protocol family 1
NET: Registered protocol family 17
@@ -101,7 +103,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly.
- Freeing unused kernel memory: 480k freed
+ Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems...
loading script...
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt
index bf7320067..ff9a06cc7 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt
@@ -1,232 +1,231 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 825990 # Simulator instruction rate (inst/s)
-host_mem_usage 193572 # Number of bytes of host memory used
-host_seconds 74.01 # Real time elapsed on the host
-host_tick_rate 47654938 # Simulator tick rate (ticks/s)
+host_inst_rate 719379 # Simulator instruction rate (inst/s)
+host_mem_usage 197268 # Number of bytes of host memory used
+host_seconds 92.21 # Real time elapsed on the host
+host_tick_rate 40502079 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
-sim_insts 61131962 # Number of instructions simulated
-sim_seconds 1.763494 # Number of seconds simulated
-sim_ticks 3526987181 # Number of ticks simulated
-system.cpu0.dtb.accesses 1987164 # DTB accesses
-system.cpu0.dtb.acv 291 # DTB access violations
-system.cpu0.dtb.hits 10431590 # DTB hits
-system.cpu0.dtb.misses 9590 # DTB misses
-system.cpu0.dtb.read_accesses 606328 # DTB read accesses
-system.cpu0.dtb.read_acv 174 # DTB read access violations
-system.cpu0.dtb.read_hits 5831565 # DTB read hits
-system.cpu0.dtb.read_misses 7663 # DTB read misses
-system.cpu0.dtb.write_accesses 1380836 # DTB write accesses
-system.cpu0.dtb.write_acv 117 # DTB write access violations
-system.cpu0.dtb.write_hits 4600025 # DTB write hits
-system.cpu0.dtb.write_misses 1927 # DTB write misses
-system.cpu0.idle_fraction 0.984514 # Percentage of idle cycles
-system.cpu0.itb.accesses 2372045 # ITB accesses
-system.cpu0.itb.acv 143 # ITB acv
-system.cpu0.itb.hits 2368331 # ITB hits
-system.cpu0.itb.misses 3714 # ITB misses
-system.cpu0.kern.callpal 145084 # number of callpals executed
+sim_insts 66337257 # Number of instructions simulated
+sim_seconds 1.867449 # Number of seconds simulated
+sim_ticks 3734898877 # Number of ticks simulated
+system.cpu0.dtb.accesses 828318 # DTB accesses
+system.cpu0.dtb.acv 315 # DTB access violations
+system.cpu0.dtb.hits 13264910 # DTB hits
+system.cpu0.dtb.misses 7094 # DTB misses
+system.cpu0.dtb.read_accesses 572336 # DTB read accesses
+system.cpu0.dtb.read_acv 200 # DTB read access violations
+system.cpu0.dtb.read_hits 8201218 # DTB read hits
+system.cpu0.dtb.read_misses 6394 # DTB read misses
+system.cpu0.dtb.write_accesses 255982 # DTB write accesses
+system.cpu0.dtb.write_acv 115 # DTB write access violations
+system.cpu0.dtb.write_hits 5063692 # DTB write hits
+system.cpu0.dtb.write_misses 700 # DTB write misses
+system.cpu0.idle_fraction 0.982517 # Percentage of idle cycles
+system.cpu0.itb.accesses 1888651 # ITB accesses
+system.cpu0.itb.acv 166 # ITB acv
+system.cpu0.itb.hits 1885318 # ITB hits
+system.cpu0.itb.misses 3333 # ITB misses
+system.cpu0.kern.callpal 146863 # number of callpals executed
system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
-system.cpu0.kern.callpal_wripir 54 0.04% 0.04% # number of callpals executed
-system.cpu0.kern.callpal_wrmces 1 0.00% 0.04% # number of callpals executed
-system.cpu0.kern.callpal_wrfen 1 0.00% 0.04% # number of callpals executed
-system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.04% # number of callpals executed
-system.cpu0.kern.callpal_swpctx 1182 0.81% 0.85% # number of callpals executed
-system.cpu0.kern.callpal_tbi 42 0.03% 0.88% # number of callpals executed
-system.cpu0.kern.callpal_wrent 7 0.00% 0.89% # number of callpals executed
-system.cpu0.kern.callpal_swpipl 135050 93.08% 93.97% # number of callpals executed
-system.cpu0.kern.callpal_rdps 4795 3.30% 97.28% # number of callpals executed
-system.cpu0.kern.callpal_wrkgp 1 0.00% 97.28% # number of callpals executed
-system.cpu0.kern.callpal_wrusp 5 0.00% 97.28% # number of callpals executed
-system.cpu0.kern.callpal_rdusp 8 0.01% 97.29% # number of callpals executed
-system.cpu0.kern.callpal_whami 2 0.00% 97.29% # number of callpals executed
-system.cpu0.kern.callpal_rti 3431 2.36% 99.65% # number of callpals executed
-system.cpu0.kern.callpal_callsys 364 0.25% 99.90% # number of callpals executed
-system.cpu0.kern.callpal_imb 139 0.10% 100.00% # number of callpals executed
+system.cpu0.kern.callpal_wripir 506 0.34% 0.35% # number of callpals executed
+system.cpu0.kern.callpal_wrmces 1 0.00% 0.35% # number of callpals executed
+system.cpu0.kern.callpal_wrfen 1 0.00% 0.35% # number of callpals executed
+system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.35% # number of callpals executed
+system.cpu0.kern.callpal_swpctx 2962 2.02% 2.36% # number of callpals executed
+system.cpu0.kern.callpal_tbi 47 0.03% 2.40% # number of callpals executed
+system.cpu0.kern.callpal_wrent 7 0.00% 2.40% # number of callpals executed
+system.cpu0.kern.callpal_swpipl 132443 90.18% 92.58% # number of callpals executed
+system.cpu0.kern.callpal_rdps 6236 4.25% 96.83% # number of callpals executed
+system.cpu0.kern.callpal_wrkgp 1 0.00% 96.83% # number of callpals executed
+system.cpu0.kern.callpal_wrusp 2 0.00% 96.83% # number of callpals executed
+system.cpu0.kern.callpal_rdusp 8 0.01% 96.84% # number of callpals executed
+system.cpu0.kern.callpal_whami 2 0.00% 96.84% # number of callpals executed
+system.cpu0.kern.callpal_rti 4200 2.86% 99.70% # number of callpals executed
+system.cpu0.kern.callpal_callsys 317 0.22% 99.91% # number of callpals executed
+system.cpu0.kern.callpal_imb 128 0.09% 100.00% # number of callpals executed
system.cpu0.kern.inst.arm 0 # number of arm instructions executed
-system.cpu0.kern.inst.hwrei 160926 # number of hwrei instructions executed
+system.cpu0.kern.inst.hwrei 160332 # number of hwrei instructions executed
system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu0.kern.inst.quiesce 1958 # number of quiesce instructions executed
-system.cpu0.kern.ipl_count 140584 # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_0 56549 40.22% 40.22% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_21 251 0.18% 40.40% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_22 5487 3.90% 44.31% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_30 51 0.04% 44.34% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_31 78246 55.66% 100.00% # number of times we switched to this ipl
-system.cpu0.kern.ipl_good 122461 # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_0 56518 46.15% 46.15% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_21 251 0.20% 46.36% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_22 5487 4.48% 50.84% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_30 51 0.04% 50.88% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_31 60154 49.12% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_ticks 3526986735 # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_0 3501352281 99.27% 99.27% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_21 53019 0.00% 99.27% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_22 1348211 0.04% 99.31% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_30 18326 0.00% 99.31% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_31 24214898 0.69% 100.00% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_used 0.871088 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.ipl_used_0 0.999452 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.inst.quiesce 6637 # number of quiesce instructions executed
+system.cpu0.kern.ipl_count 139203 # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_0 55744 40.05% 40.05% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_21 245 0.18% 40.22% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_22 1904 1.37% 41.59% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_30 410 0.29% 41.88% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_31 80900 58.12% 100.00% # number of times we switched to this ipl
+system.cpu0.kern.ipl_good 112527 # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_0 55189 49.05% 49.05% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_21 245 0.22% 49.26% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_22 1904 1.69% 50.95% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_30 410 0.36% 51.32% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_31 54779 48.68% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_ticks 3734378988 # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_0 3696326531 98.98% 98.98% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_21 53683 0.00% 98.98% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_22 224672 0.01% 98.99% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_30 128286 0.00% 98.99% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_31 37645816 1.01% 100.00% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_used 0.808366 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.ipl_used_0 0.990044 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.ipl_used_31 0.768781 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.mode_good_kernel 1448
-system.cpu0.kern.mode_good_user 1300
-system.cpu0.kern.mode_good_idle 148
-system.cpu0.kern.mode_switch_kernel 2490 # number of protection mode switches
-system.cpu0.kern.mode_switch_user 1300 # number of protection mode switches
-system.cpu0.kern.mode_switch_idle 2110 # number of protection mode switches
-system.cpu0.kern.mode_switch_good 0.490847 # fraction of useful protection mode switches
-system.cpu0.kern.mode_switch_good_kernel 0.581526 # fraction of useful protection mode switches
+system.cpu0.kern.ipl_used_31 0.677120 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.mode_good_kernel 1095
+system.cpu0.kern.mode_good_user 1095
+system.cpu0.kern.mode_good_idle 0
+system.cpu0.kern.mode_switch_kernel 6628 # number of protection mode switches
+system.cpu0.kern.mode_switch_user 1095 # number of protection mode switches
+system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches
+system.cpu0.kern.mode_switch_good 0.283569 # fraction of useful protection mode switches
+system.cpu0.kern.mode_switch_good_kernel 0.165208 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu0.kern.mode_switch_good_idle 0.070142 # fraction of useful protection mode switches
-system.cpu0.kern.mode_ticks_kernel 23256451 0.66% 0.66% # number of ticks spent at the given mode
-system.cpu0.kern.mode_ticks_user 3397192 0.10% 0.76% # number of ticks spent at the given mode
-system.cpu0.kern.mode_ticks_idle 3500333090 99.24% 100.00% # number of ticks spent at the given mode
-system.cpu0.kern.swap_context 1183 # number of times the context was actually changed
-system.cpu0.kern.syscall 231 # number of syscalls executed
-system.cpu0.kern.syscall_fork 6 2.60% 2.60% # number of syscalls executed
-system.cpu0.kern.syscall_read 17 7.36% 9.96% # number of syscalls executed
-system.cpu0.kern.syscall_write 4 1.73% 11.69% # number of syscalls executed
-system.cpu0.kern.syscall_close 31 13.42% 25.11% # number of syscalls executed
-system.cpu0.kern.syscall_chdir 1 0.43% 25.54% # number of syscalls executed
-system.cpu0.kern.syscall_obreak 11 4.76% 30.30% # number of syscalls executed
-system.cpu0.kern.syscall_lseek 6 2.60% 32.90% # number of syscalls executed
-system.cpu0.kern.syscall_getpid 4 1.73% 34.63% # number of syscalls executed
-system.cpu0.kern.syscall_setuid 2 0.87% 35.50% # number of syscalls executed
-system.cpu0.kern.syscall_getuid 4 1.73% 37.23% # number of syscalls executed
-system.cpu0.kern.syscall_access 9 3.90% 41.13% # number of syscalls executed
-system.cpu0.kern.syscall_dup 2 0.87% 41.99% # number of syscalls executed
-system.cpu0.kern.syscall_open 42 18.18% 60.17% # number of syscalls executed
-system.cpu0.kern.syscall_getgid 4 1.73% 61.90% # number of syscalls executed
-system.cpu0.kern.syscall_sigprocmask 7 3.03% 64.94% # number of syscalls executed
-system.cpu0.kern.syscall_ioctl 9 3.90% 68.83% # number of syscalls executed
-system.cpu0.kern.syscall_readlink 1 0.43% 69.26% # number of syscalls executed
-system.cpu0.kern.syscall_execve 4 1.73% 71.00% # number of syscalls executed
-system.cpu0.kern.syscall_mmap 35 15.15% 86.15% # number of syscalls executed
-system.cpu0.kern.syscall_munmap 2 0.87% 87.01% # number of syscalls executed
-system.cpu0.kern.syscall_mprotect 10 4.33% 91.34% # number of syscalls executed
-system.cpu0.kern.syscall_gethostname 1 0.43% 91.77% # number of syscalls executed
-system.cpu0.kern.syscall_dup2 2 0.87% 92.64% # number of syscalls executed
-system.cpu0.kern.syscall_fcntl 8 3.46% 96.10% # number of syscalls executed
-system.cpu0.kern.syscall_socket 2 0.87% 96.97% # number of syscalls executed
-system.cpu0.kern.syscall_connect 2 0.87% 97.84% # number of syscalls executed
-system.cpu0.kern.syscall_setgid 2 0.87% 98.70% # number of syscalls executed
-system.cpu0.kern.syscall_getrlimit 1 0.43% 99.13% # number of syscalls executed
-system.cpu0.kern.syscall_setsid 2 0.87% 100.00% # number of syscalls executed
-system.cpu0.not_idle_fraction 0.015486 # Percentage of non-idle cycles
+system.cpu0.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
+system.cpu0.kern.mode_ticks_kernel 3730042316 99.93% 99.93% # number of ticks spent at the given mode
+system.cpu0.kern.mode_ticks_user 2718822 0.07% 100.00% # number of ticks spent at the given mode
+system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
+system.cpu0.kern.swap_context 2963 # number of times the context was actually changed
+system.cpu0.kern.syscall 179 # number of syscalls executed
+system.cpu0.kern.syscall_fork 7 3.91% 3.91% # number of syscalls executed
+system.cpu0.kern.syscall_read 14 7.82% 11.73% # number of syscalls executed
+system.cpu0.kern.syscall_write 4 2.23% 13.97% # number of syscalls executed
+system.cpu0.kern.syscall_close 27 15.08% 29.05% # number of syscalls executed
+system.cpu0.kern.syscall_chdir 1 0.56% 29.61% # number of syscalls executed
+system.cpu0.kern.syscall_obreak 6 3.35% 32.96% # number of syscalls executed
+system.cpu0.kern.syscall_lseek 7 3.91% 36.87% # number of syscalls executed
+system.cpu0.kern.syscall_getpid 4 2.23% 39.11% # number of syscalls executed
+system.cpu0.kern.syscall_setuid 1 0.56% 39.66% # number of syscalls executed
+system.cpu0.kern.syscall_getuid 3 1.68% 41.34% # number of syscalls executed
+system.cpu0.kern.syscall_access 6 3.35% 44.69% # number of syscalls executed
+system.cpu0.kern.syscall_dup 2 1.12% 45.81% # number of syscalls executed
+system.cpu0.kern.syscall_open 30 16.76% 62.57% # number of syscalls executed
+system.cpu0.kern.syscall_getgid 3 1.68% 64.25% # number of syscalls executed
+system.cpu0.kern.syscall_sigprocmask 8 4.47% 68.72% # number of syscalls executed
+system.cpu0.kern.syscall_ioctl 8 4.47% 73.18% # number of syscalls executed
+system.cpu0.kern.syscall_execve 5 2.79% 75.98% # number of syscalls executed
+system.cpu0.kern.syscall_mmap 17 9.50% 85.47% # number of syscalls executed
+system.cpu0.kern.syscall_munmap 3 1.68% 87.15% # number of syscalls executed
+system.cpu0.kern.syscall_mprotect 4 2.23% 89.39% # number of syscalls executed
+system.cpu0.kern.syscall_gethostname 1 0.56% 89.94% # number of syscalls executed
+system.cpu0.kern.syscall_dup2 2 1.12% 91.06% # number of syscalls executed
+system.cpu0.kern.syscall_fcntl 8 4.47% 95.53% # number of syscalls executed
+system.cpu0.kern.syscall_socket 2 1.12% 96.65% # number of syscalls executed
+system.cpu0.kern.syscall_connect 2 1.12% 97.77% # number of syscalls executed
+system.cpu0.kern.syscall_setgid 1 0.56% 98.32% # number of syscalls executed
+system.cpu0.kern.syscall_getrlimit 1 0.56% 98.88% # number of syscalls executed
+system.cpu0.kern.syscall_setsid 2 1.12% 100.00% # number of syscalls executed
+system.cpu0.not_idle_fraction 0.017483 # Percentage of non-idle cycles
system.cpu0.numCycles 0 # number of cpu cycles simulated
-system.cpu0.num_insts 44155958 # Number of instructions executed
-system.cpu0.num_refs 10463340 # Number of memory references
-system.cpu1.dtb.accesses 323344 # DTB accesses
-system.cpu1.dtb.acv 82 # DTB access violations
-system.cpu1.dtb.hits 4234985 # DTB hits
-system.cpu1.dtb.misses 2977 # DTB misses
-system.cpu1.dtb.read_accesses 222873 # DTB read accesses
-system.cpu1.dtb.read_acv 36 # DTB read access violations
-system.cpu1.dtb.read_hits 2431648 # DTB read hits
-system.cpu1.dtb.read_misses 2698 # DTB read misses
-system.cpu1.dtb.write_accesses 100471 # DTB write accesses
-system.cpu1.dtb.write_acv 46 # DTB write access violations
-system.cpu1.dtb.write_hits 1803337 # DTB write hits
-system.cpu1.dtb.write_misses 279 # DTB write misses
-system.cpu1.idle_fraction 0.993979 # Percentage of idle cycles
-system.cpu1.itb.accesses 912010 # ITB accesses
-system.cpu1.itb.acv 41 # ITB acv
-system.cpu1.itb.hits 910678 # ITB hits
-system.cpu1.itb.misses 1332 # ITB misses
-system.cpu1.kern.callpal 57529 # number of callpals executed
+system.cpu0.num_insts 51973218 # Number of instructions executed
+system.cpu0.num_refs 13496062 # Number of memory references
+system.cpu1.dtb.accesses 477041 # DTB accesses
+system.cpu1.dtb.acv 52 # DTB access violations
+system.cpu1.dtb.hits 4561390 # DTB hits
+system.cpu1.dtb.misses 4359 # DTB misses
+system.cpu1.dtb.read_accesses 328551 # DTB read accesses
+system.cpu1.dtb.read_acv 10 # DTB read access violations
+system.cpu1.dtb.read_hits 2657400 # DTB read hits
+system.cpu1.dtb.read_misses 3911 # DTB read misses
+system.cpu1.dtb.write_accesses 148490 # DTB write accesses
+system.cpu1.dtb.write_acv 42 # DTB write access violations
+system.cpu1.dtb.write_hits 1903990 # DTB write hits
+system.cpu1.dtb.write_misses 448 # DTB write misses
+system.cpu1.idle_fraction 0.994927 # Percentage of idle cycles
+system.cpu1.itb.accesses 1392687 # ITB accesses
+system.cpu1.itb.acv 18 # ITB acv
+system.cpu1.itb.hits 1391015 # ITB hits
+system.cpu1.itb.misses 1672 # ITB misses
+system.cpu1.kern.callpal 74370 # number of callpals executed
system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
-system.cpu1.kern.callpal_wripir 51 0.09% 0.09% # number of callpals executed
-system.cpu1.kern.callpal_wrmces 1 0.00% 0.09% # number of callpals executed
-system.cpu1.kern.callpal_wrfen 1 0.00% 0.09% # number of callpals executed
-system.cpu1.kern.callpal_swpctx 451 0.78% 0.88% # number of callpals executed
-system.cpu1.kern.callpal_tbi 12 0.02% 0.90% # number of callpals executed
-system.cpu1.kern.callpal_wrent 7 0.01% 0.91% # number of callpals executed
-system.cpu1.kern.callpal_swpipl 54081 94.01% 94.92% # number of callpals executed
-system.cpu1.kern.callpal_rdps 368 0.64% 95.56% # number of callpals executed
-system.cpu1.kern.callpal_wrkgp 1 0.00% 95.56% # number of callpals executed
-system.cpu1.kern.callpal_wrusp 2 0.00% 95.56% # number of callpals executed
-system.cpu1.kern.callpal_rdusp 2 0.00% 95.57% # number of callpals executed
-system.cpu1.kern.callpal_whami 3 0.01% 95.57% # number of callpals executed
-system.cpu1.kern.callpal_rti 2337 4.06% 99.63% # number of callpals executed
-system.cpu1.kern.callpal_callsys 169 0.29% 99.93% # number of callpals executed
-system.cpu1.kern.callpal_imb 41 0.07% 100.00% # number of callpals executed
+system.cpu1.kern.callpal_wripir 410 0.55% 0.55% # number of callpals executed
+system.cpu1.kern.callpal_wrmces 1 0.00% 0.55% # number of callpals executed
+system.cpu1.kern.callpal_wrfen 1 0.00% 0.56% # number of callpals executed
+system.cpu1.kern.callpal_swpctx 2102 2.83% 3.38% # number of callpals executed
+system.cpu1.kern.callpal_tbi 6 0.01% 3.39% # number of callpals executed
+system.cpu1.kern.callpal_wrent 7 0.01% 3.40% # number of callpals executed
+system.cpu1.kern.callpal_swpipl 65072 87.50% 90.90% # number of callpals executed
+system.cpu1.kern.callpal_rdps 2603 3.50% 94.40% # number of callpals executed
+system.cpu1.kern.callpal_wrkgp 1 0.00% 94.40% # number of callpals executed
+system.cpu1.kern.callpal_wrusp 5 0.01% 94.41% # number of callpals executed
+system.cpu1.kern.callpal_rdusp 1 0.00% 94.41% # number of callpals executed
+system.cpu1.kern.callpal_whami 3 0.00% 94.41% # number of callpals executed
+system.cpu1.kern.callpal_rti 3890 5.23% 99.64% # number of callpals executed
+system.cpu1.kern.callpal_callsys 214 0.29% 99.93% # number of callpals executed
+system.cpu1.kern.callpal_imb 52 0.07% 100.00% # number of callpals executed
system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed
system.cpu1.kern.inst.arm 0 # number of arm instructions executed
-system.cpu1.kern.inst.hwrei 63811 # number of hwrei instructions executed
+system.cpu1.kern.inst.hwrei 82881 # number of hwrei instructions executed
system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu1.kern.inst.quiesce 1898 # number of quiesce instructions executed
-system.cpu1.kern.ipl_count 58267 # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_0 25040 42.97% 42.97% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_22 5452 9.36% 52.33% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_30 54 0.09% 52.42% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_31 27721 47.58% 100.00% # number of times we switched to this ipl
-system.cpu1.kern.ipl_good 57331 # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_0 25007 43.62% 43.62% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_22 5452 9.51% 53.13% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_30 54 0.09% 53.22% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_31 26818 46.78% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_ticks 3526422675 # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_0 3497592433 99.18% 99.18% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_22 1410084 0.04% 99.22% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_30 19740 0.00% 99.22% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_31 27400418 0.78% 100.00% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_used 0.983936 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.ipl_used_0 0.998682 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.inst.quiesce 2511 # number of quiesce instructions executed
+system.cpu1.kern.ipl_count 71371 # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_0 27750 38.88% 38.88% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_22 1902 2.66% 41.55% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_30 506 0.71% 42.26% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_31 41213 57.74% 100.00% # number of times we switched to this ipl
+system.cpu1.kern.ipl_good 55758 # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_0 26928 48.29% 48.29% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_22 1902 3.41% 51.71% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_30 506 0.91% 52.61% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_31 26422 47.39% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_ticks 3734898431 # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_0 3704872588 99.20% 99.20% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_22 224436 0.01% 99.20% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_30 162482 0.00% 99.21% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_31 29638925 0.79% 100.00% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_used 0.781242 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.ipl_used_0 0.970378 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.ipl_used_31 0.967425 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.mode_good_kernel 465
-system.cpu1.kern.mode_good_user 465
-system.cpu1.kern.mode_good_idle 0
-system.cpu1.kern.mode_switch_kernel 2771 # number of protection mode switches
-system.cpu1.kern.mode_switch_user 465 # number of protection mode switches
-system.cpu1.kern.mode_switch_idle 0 # number of protection mode switches
-system.cpu1.kern.mode_switch_good 0.287392 # fraction of useful protection mode switches
-system.cpu1.kern.mode_switch_good_kernel 0.167809 # fraction of useful protection mode switches
+system.cpu1.kern.ipl_used_31 0.641108 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.mode_good_kernel 1093
+system.cpu1.kern.mode_good_user 662
+system.cpu1.kern.mode_good_idle 431
+system.cpu1.kern.mode_switch_kernel 2354 # number of protection mode switches
+system.cpu1.kern.mode_switch_user 662 # number of protection mode switches
+system.cpu1.kern.mode_switch_idle 2830 # number of protection mode switches
+system.cpu1.kern.mode_switch_good 0.373931 # fraction of useful protection mode switches
+system.cpu1.kern.mode_switch_good_kernel 0.464316 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu1.kern.mode_switch_good_idle no value # fraction of useful protection mode switches
-system.cpu1.kern.mode_ticks_kernel 3525066043 99.96% 99.96% # number of ticks spent at the given mode
-system.cpu1.kern.mode_ticks_user 1294184 0.04% 100.00% # number of ticks spent at the given mode
-system.cpu1.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
-system.cpu1.kern.swap_context 452 # number of times the context was actually changed
-system.cpu1.kern.syscall 98 # number of syscalls executed
-system.cpu1.kern.syscall_fork 2 2.04% 2.04% # number of syscalls executed
-system.cpu1.kern.syscall_read 13 13.27% 15.31% # number of syscalls executed
-system.cpu1.kern.syscall_close 12 12.24% 27.55% # number of syscalls executed
-system.cpu1.kern.syscall_chmod 1 1.02% 28.57% # number of syscalls executed
-system.cpu1.kern.syscall_obreak 4 4.08% 32.65% # number of syscalls executed
-system.cpu1.kern.syscall_lseek 4 4.08% 36.73% # number of syscalls executed
-system.cpu1.kern.syscall_getpid 2 2.04% 38.78% # number of syscalls executed
-system.cpu1.kern.syscall_setuid 2 2.04% 40.82% # number of syscalls executed
-system.cpu1.kern.syscall_getuid 2 2.04% 42.86% # number of syscalls executed
-system.cpu1.kern.syscall_access 2 2.04% 44.90% # number of syscalls executed
-system.cpu1.kern.syscall_open 13 13.27% 58.16% # number of syscalls executed
-system.cpu1.kern.syscall_getgid 2 2.04% 60.20% # number of syscalls executed
-system.cpu1.kern.syscall_sigprocmask 3 3.06% 63.27% # number of syscalls executed
-system.cpu1.kern.syscall_ioctl 1 1.02% 64.29% # number of syscalls executed
-system.cpu1.kern.syscall_execve 3 3.06% 67.35% # number of syscalls executed
-system.cpu1.kern.syscall_mmap 19 19.39% 86.73% # number of syscalls executed
-system.cpu1.kern.syscall_munmap 1 1.02% 87.76% # number of syscalls executed
-system.cpu1.kern.syscall_mprotect 6 6.12% 93.88% # number of syscalls executed
-system.cpu1.kern.syscall_dup2 1 1.02% 94.90% # number of syscalls executed
-system.cpu1.kern.syscall_fcntl 2 2.04% 96.94% # number of syscalls executed
-system.cpu1.kern.syscall_setgid 2 2.04% 98.98% # number of syscalls executed
-system.cpu1.kern.syscall_getrlimit 1 1.02% 100.00% # number of syscalls executed
-system.cpu1.not_idle_fraction 0.006021 # Percentage of non-idle cycles
+system.cpu1.kern.mode_switch_good_idle 0.152297 # fraction of useful protection mode switches
+system.cpu1.kern.mode_ticks_kernel 13359666 0.36% 0.36% # number of ticks spent at the given mode
+system.cpu1.kern.mode_ticks_user 1967356 0.05% 0.41% # number of ticks spent at the given mode
+system.cpu1.kern.mode_ticks_idle 3719571407 99.59% 100.00% # number of ticks spent at the given mode
+system.cpu1.kern.swap_context 2103 # number of times the context was actually changed
+system.cpu1.kern.syscall 150 # number of syscalls executed
+system.cpu1.kern.syscall_fork 1 0.67% 0.67% # number of syscalls executed
+system.cpu1.kern.syscall_read 16 10.67% 11.33% # number of syscalls executed
+system.cpu1.kern.syscall_close 16 10.67% 22.00% # number of syscalls executed
+system.cpu1.kern.syscall_chmod 1 0.67% 22.67% # number of syscalls executed
+system.cpu1.kern.syscall_obreak 9 6.00% 28.67% # number of syscalls executed
+system.cpu1.kern.syscall_lseek 3 2.00% 30.67% # number of syscalls executed
+system.cpu1.kern.syscall_getpid 2 1.33% 32.00% # number of syscalls executed
+system.cpu1.kern.syscall_setuid 3 2.00% 34.00% # number of syscalls executed
+system.cpu1.kern.syscall_getuid 3 2.00% 36.00% # number of syscalls executed
+system.cpu1.kern.syscall_access 5 3.33% 39.33% # number of syscalls executed
+system.cpu1.kern.syscall_open 25 16.67% 56.00% # number of syscalls executed
+system.cpu1.kern.syscall_getgid 3 2.00% 58.00% # number of syscalls executed
+system.cpu1.kern.syscall_sigprocmask 2 1.33% 59.33% # number of syscalls executed
+system.cpu1.kern.syscall_ioctl 2 1.33% 60.67% # number of syscalls executed
+system.cpu1.kern.syscall_readlink 1 0.67% 61.33% # number of syscalls executed
+system.cpu1.kern.syscall_execve 2 1.33% 62.67% # number of syscalls executed
+system.cpu1.kern.syscall_mmap 37 24.67% 87.33% # number of syscalls executed
+system.cpu1.kern.syscall_mprotect 12 8.00% 95.33% # number of syscalls executed
+system.cpu1.kern.syscall_dup2 1 0.67% 96.00% # number of syscalls executed
+system.cpu1.kern.syscall_fcntl 2 1.33% 97.33% # number of syscalls executed
+system.cpu1.kern.syscall_setgid 3 2.00% 99.33% # number of syscalls executed
+system.cpu1.kern.syscall_getrlimit 1 0.67% 100.00% # number of syscalls executed
+system.cpu1.not_idle_fraction 0.005073 # Percentage of non-idle cycles
system.cpu1.numCycles 0 # number of cpu cycles simulated
-system.cpu1.num_insts 16976004 # Number of instructions executed
-system.cpu1.num_refs 4251312 # Number of memory references
+system.cpu1.num_insts 14364039 # Number of instructions executed
+system.cpu1.num_refs 4590544 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
-system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes.
-system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes.
-system.disk0.dma_write_txs 412 # Number of DMA write transactions.
+system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
+system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
+system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).
@@ -235,7 +234,7 @@ system.disk2.dma_write_full_pages 1 # Nu
system.disk2.dma_write_txs 1 # Number of DMA write transactions.
system.tsunami.ethernet.coalescedRxDesc <err: div-0> # average number of RxDesc's coalesced into each post
system.tsunami.ethernet.coalescedRxIdle <err: div-0> # average number of RxIdle's coalesced into each post
-system.tsunami.ethernet.coalescedRxOk <err: div-0> # average number of RxOk's coalesced into each post
+system.tsunami.ethernet.coalescedRxOk no value # average number of RxOk's coalesced into each post
system.tsunami.ethernet.coalescedRxOrn <err: div-0> # average number of RxOrn's coalesced into each post
system.tsunami.ethernet.coalescedSwi <err: div-0> # average number of Swi's coalesced into each post
system.tsunami.ethernet.coalescedTotal <err: div-0> # average number of interrupts coalesced into each post
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr
index 2191bd088..c8703fde1 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr
@@ -3,4 +3,4 @@ Listening for console connection on port 3456
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000
0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001
warn: Entering event queue @ 0. Starting simulation...
-warn: 271342: Trying to launch CPU number 1!
+warn: 271343: Trying to launch CPU number 1!
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout
index 2c496b914..498a94b6f 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 5 2006 22:13:02
-M5 started Fri Oct 6 00:26:09 2006
-M5 executing on zizzer.eecs.umich.edu
+M5 compiled Oct 8 2006 21:57:24
+M5 started Sun Oct 8 22:00:29 2006
+M5 executing on zed.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual
-Exiting @ tick 3526987181 because m5_exit instruction encountered
+Exiting @ tick 3734898877 because m5_exit instruction encountered
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini
index 7f27ca121..21d606051 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini
@@ -75,7 +75,7 @@ side_b=system.membus.port[0]
type=TimingSimpleCPU
children=dtb itb
clock=1
-cpu_id=-1
+cpu_id=0
defer_registration=false
dtb=system.cpu.dtb
function_trace=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out
index deba80368..73f9edaea 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out
@@ -90,9 +90,9 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
itb=system.cpu.itb
dtb=system.cpu.dtb
-cpu_id=-1
profile=0
clock=1
defer_registration=false
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console
index d6e3955cc..5461cc4ab 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console
@@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
memsize 8000000 pages 4000
First free page after ROM 0xFFFFFC0000018000
HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000
- kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC00008064E8, kentry = 0xFFFFFC0000310000, numCPUs = 0x1
+ kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x1
CPU Clock at 2000 MHz IntrClockFrequency=1024
Booting with 1 processor(s)
KSP: 0x20043FE8 PTBR 0x20
@@ -14,28 +14,26 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ConsoleDispatch at virt 10000658 phys 18658 val FFFFFC00000100A8
unix_boot_mem ends at FFFFFC0000076000
k_argc = 0
- jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1028)
- CallbackFixup 0 18000, t7=FFFFFC0000700000
- Linux version 2.6.8.1 (binkertn@ziff.eecs.umich.edu) (gcc version 3.4.3) #36 SMP Mon May 2 19:50:53 EDT 2005
+ jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
+ CallbackFixup 0 18000, t7=FFFFFC000070C000
+ Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK
Command line: root=/dev/hda1 console=ttyS0
memcluster 0, usage 1, start 0, end 392
memcluster 1, usage 0, start 392, end 16384
- freeing pages 1030:16384
- reserving pages 1030:1031
+ freeing pages 1069:16384
+ reserving pages 1069:1070
SMP: 1 CPUs probed -- cpu_present_mask = 1
Built 1 zonelists
Kernel command line: root=/dev/hda1 console=ttyS0
- PID hash table entries: 1024 (order 10: 16384 bytes)
+ PID hash table entries: 1024 (order: 10, 32768 bytes)
Using epoch = 1900
Console: colour dummy device 80x25
Dentry cache hash table entries: 32768 (order: 5, 262144 bytes)
Inode-cache hash table entries: 16384 (order: 4, 131072 bytes)
- Memory: 119072k/131072k available (3058k kernel code, 8680k reserved, 695k data, 480k init)
- Mount-cache hash table entries: 512 (order: 0, 8192 bytes)
- per-CPU timeslice cutoff: 374.49 usecs.
- task migration cache decay timeout: 0 msecs.
+ Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init)
+ Mount-cache hash table entries: 512
SMP mode deactivated.
Brought up 1 CPUs
SMP: Total of 1 processors activated (4002.20 BogoMIPS).
@@ -48,16 +46,21 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
Initializing Cryptographic API
rtc: Standard PC (1900) epoch (1900) detected
Real Time Clock Driver v1.12
- Serial: 8250/16550 driver $Revision: 1.90 $ 5 ports, IRQ sharing disabled
+ Serial: 8250/16550 driver $Revision: 1.90 $ 1 ports, IRQ sharing disabled
ttyS0 at I/O 0x3f8 (irq = 4) is a 8250
+ io scheduler noop registered
+ io scheduler anticipatory registered
+ io scheduler deadline registered
+ io scheduler cfq registered
loop: loaded (max 8 devices)
- Using anticipatory io scheduler
nbd: registered device at major 43
- sinic.c: M5 Simple Integrated NIC driver
ns83820.c: National Semiconductor DP83820 10/100/1000 driver.
eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000
eth0: enabling optical transceiver
- eth0: ns83820 v0.20: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=sg
+ eth0: using 64 bit addressing.
+ eth0: ns83820 v0.22: DP83820 v1.3: 00:90:00:00:00:01 io=0x09000000 irq=30 f=h,sg
+ tun: Universal TUN/TAP device driver, 1.6
+ tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
ide: Assuming 33MHz system bus speed for PIO modes; override with idebus=xx
PIIX4: IDE controller at PCI slot 0000:00:00.0
@@ -70,24 +73,23 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
ide0 at 0x8410-0x8417,0x8422 on irq 31
hda: max request size: 128KiB
hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33)
+ hda: cache flushes not supported
hda: hda1
hdb: max request size: 128KiB
hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33)
+ hdb: cache flushes not supported
hdb: unknown partition table
- scsi0 : scsi_m5, version 1.73 [20040518], dev_size_mb=8, opts=0x0
- Vendor: Linux Model: scsi_m5 Li Rev: 0004
- Type: Direct-Access ANSI SCSI revision: 03
- SCSI device sda: 16384 512-byte hdwr sectors (8 MB)
- SCSI device sda: drive cache: write back
- sda: unknown partition table
- Attached scsi disk sda at scsi0, channel 0, id 0, lun 0
mice: PS/2 mouse device common for all mice
NET: Registered protocol family 2
- IP: routing cache hash table of 1024 buckets, 16Kbytes
- TCP: Hash tables configured (established 8192 bind 8192)
- ip_conntrack version 2.1 (512 buckets, 4096 max) - 440 bytes per conntrack
+ IP route cache hash table entries: 4096 (order: 2, 32768 bytes)
+ TCP established hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP bind hash table entries: 16384 (order: 5, 262144 bytes)
+ TCP: Hash tables configured (established 16384 bind 16384)
+ TCP reno registered
+ ip_conntrack version 2.1 (512 buckets, 4096 max) - 296 bytes per conntrack
ip_tables: (C) 2000-2002 Netfilter core team
arp_tables: (C) 2002 David S. Miller
+ TCP bic registered
Initializing IPsec netlink socket
NET: Registered protocol family 1
NET: Registered protocol family 17
@@ -96,7 +98,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
802.1Q VLAN Support v1.8 Ben Greear <greearb@candelatech.com>
All bugs added by David S. Miller <davem@redhat.com>
VFS: Mounted root (ext2 filesystem) readonly.
- Freeing unused kernel memory: 480k freed
+ Freeing unused kernel memory: 224k freed
init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary
mounting filesystems...
loading script...
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt
index 1d45d41a9..ba645e5c7 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt
@@ -1,86 +1,86 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 820839 # Simulator instruction rate (inst/s)
-host_mem_usage 193264 # Number of bytes of host memory used
-host_seconds 70.65 # Real time elapsed on the host
-host_tick_rate 49454399 # Simulator tick rate (ticks/s)
+host_inst_rate 740935 # Simulator instruction rate (inst/s)
+host_mem_usage 196820 # Number of bytes of host memory used
+host_seconds 83.36 # Real time elapsed on the host
+host_tick_rate 43810981 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
-sim_insts 57989043 # Number of instructions simulated
-sim_seconds 1.746889 # Number of seconds simulated
-sim_ticks 3493777466 # Number of ticks simulated
-system.cpu.dtb.accesses 2309470 # DTB accesses
+sim_insts 61760478 # Number of instructions simulated
+sim_seconds 1.825937 # Number of seconds simulated
+sim_ticks 3651873858 # Number of ticks simulated
+system.cpu.dtb.accesses 1304494 # DTB accesses
system.cpu.dtb.acv 367 # DTB access violations
-system.cpu.dtb.hits 13707871 # DTB hits
-system.cpu.dtb.misses 12493 # DTB misses
-system.cpu.dtb.read_accesses 828530 # DTB read accesses
+system.cpu.dtb.hits 16545335 # DTB hits
+system.cpu.dtb.misses 11425 # DTB misses
+system.cpu.dtb.read_accesses 900425 # DTB read accesses
system.cpu.dtb.read_acv 210 # DTB read access violations
-system.cpu.dtb.read_hits 7595606 # DTB read hits
-system.cpu.dtb.read_misses 10298 # DTB read misses
-system.cpu.dtb.write_accesses 1480940 # DTB write accesses
+system.cpu.dtb.read_hits 10034117 # DTB read hits
+system.cpu.dtb.read_misses 10280 # DTB read misses
+system.cpu.dtb.write_accesses 404069 # DTB write accesses
system.cpu.dtb.write_acv 157 # DTB write access violations
-system.cpu.dtb.write_hits 6112265 # DTB write hits
-system.cpu.dtb.write_misses 2195 # DTB write misses
-system.cpu.idle_fraction 0.979465 # Percentage of idle cycles
-system.cpu.itb.accesses 3281347 # ITB accesses
+system.cpu.dtb.write_hits 6511218 # DTB write hits
+system.cpu.dtb.write_misses 1145 # DTB write misses
+system.cpu.idle_fraction 0.978539 # Percentage of idle cycles
+system.cpu.itb.accesses 3281311 # ITB accesses
system.cpu.itb.acv 184 # ITB acv
-system.cpu.itb.hits 3276357 # ITB hits
+system.cpu.itb.hits 3276321 # ITB hits
system.cpu.itb.misses 4990 # ITB misses
-system.cpu.kern.callpal 182454 # number of callpals executed
+system.cpu.kern.callpal 193987 # number of callpals executed
system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed
-system.cpu.kern.callpal_swpctx 1571 0.86% 0.86% # number of callpals executed
-system.cpu.kern.callpal_tbi 54 0.03% 0.89% # number of callpals executed
-system.cpu.kern.callpal_wrent 7 0.00% 0.90% # number of callpals executed
-system.cpu.kern.callpal_swpipl 171092 93.77% 94.67% # number of callpals executed
-system.cpu.kern.callpal_rdps 5160 2.83% 97.50% # number of callpals executed
-system.cpu.kern.callpal_wrkgp 1 0.00% 97.50% # number of callpals executed
-system.cpu.kern.callpal_wrusp 7 0.00% 97.50% # number of callpals executed
-system.cpu.kern.callpal_rdusp 10 0.01% 97.51% # number of callpals executed
-system.cpu.kern.callpal_whami 2 0.00% 97.51% # number of callpals executed
-system.cpu.kern.callpal_rti 3834 2.10% 99.61% # number of callpals executed
-system.cpu.kern.callpal_callsys 531 0.29% 99.90% # number of callpals executed
-system.cpu.kern.callpal_imb 181 0.10% 100.00% # number of callpals executed
+system.cpu.kern.callpal_swpctx 4203 2.17% 2.17% # number of callpals executed
+system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed
+system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed
+system.cpu.kern.callpal_swpipl 176881 91.18% 93.38% # number of callpals executed
+system.cpu.kern.callpal_rdps 6888 3.55% 96.93% # number of callpals executed
+system.cpu.kern.callpal_wrkgp 1 0.00% 96.93% # number of callpals executed
+system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed
+system.cpu.kern.callpal_rti 5219 2.69% 99.63% # number of callpals executed
+system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed
+system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed
system.cpu.kern.inst.arm 0 # number of arm instructions executed
-system.cpu.kern.inst.hwrei 202524 # number of hwrei instructions executed
+system.cpu.kern.inst.hwrei 213061 # number of hwrei instructions executed
system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu.kern.inst.quiesce 1876 # number of quiesce instructions executed
-system.cpu.kern.ipl_count 176961 # number of times we switched to this ipl
-system.cpu.kern.ipl_count_0 74471 42.08% 42.08% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_21 251 0.14% 42.23% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_22 5439 3.07% 45.30% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_31 96800 54.70% 100.00% # number of times we switched to this ipl
-system.cpu.kern.ipl_good 158180 # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_0 74417 47.05% 47.05% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_21 251 0.16% 47.20% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_22 5439 3.44% 50.64% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_31 78073 49.36% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_ticks 3493777020 # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_0 3466334940 99.21% 99.21% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_21 53019 0.00% 99.22% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_22 1268195 0.04% 99.25% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_31 26120866 0.75% 100.00% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_used 0.893869 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.ipl_used_0 0.999275 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.inst.quiesce 6207 # number of quiesce instructions executed
+system.cpu.kern.ipl_count 184207 # number of times we switched to this ipl
+system.cpu.kern.ipl_count_0 75390 40.93% 40.93% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_21 245 0.13% 41.06% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_22 1861 1.01% 42.07% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_31 106711 57.93% 100.00% # number of times we switched to this ipl
+system.cpu.kern.ipl_good 150152 # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_0 74023 49.30% 49.30% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_22 1861 1.24% 50.70% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_31 74023 49.30% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_ticks 3651873412 # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_0 3611240657 98.89% 98.89% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_21 53683 0.00% 98.89% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_22 219598 0.01% 98.89% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_31 40359474 1.11% 100.00% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_used 0.815126 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.ipl_used_0 0.981868 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.ipl_used_31 0.806539 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.mode_good_kernel 1938
-system.cpu.kern.mode_good_user 1757
-system.cpu.kern.mode_good_idle 181
-system.cpu.kern.mode_switch_kernel 3323 # number of protection mode switches
-system.cpu.kern.mode_switch_user 1757 # number of protection mode switches
-system.cpu.kern.mode_switch_idle 2060 # number of protection mode switches
-system.cpu.kern.mode_switch_good 0.542857 # fraction of useful protection mode switches
-system.cpu.kern.mode_switch_good_kernel 0.583208 # fraction of useful protection mode switches
+system.cpu.kern.ipl_used_31 0.693677 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.mode_good_kernel 1934
+system.cpu.kern.mode_good_user 1754
+system.cpu.kern.mode_good_idle 180
+system.cpu.kern.mode_switch_kernel 5984 # number of protection mode switches
+system.cpu.kern.mode_switch_user 1754 # number of protection mode switches
+system.cpu.kern.mode_switch_idle 2104 # number of protection mode switches
+system.cpu.kern.mode_switch_good 0.393010 # fraction of useful protection mode switches
+system.cpu.kern.mode_switch_good_kernel 0.323195 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu.kern.mode_switch_good_idle 0.087864 # fraction of useful protection mode switches
-system.cpu.kern.mode_ticks_kernel 39254786 1.12% 1.12% # number of ticks spent at the given mode
-system.cpu.kern.mode_ticks_user 4685669 0.13% 1.26% # number of ticks spent at the given mode
-system.cpu.kern.mode_ticks_idle 3449836563 98.74% 100.00% # number of ticks spent at the given mode
-system.cpu.kern.swap_context 1572 # number of times the context was actually changed
+system.cpu.kern.mode_switch_good_idle 0.085551 # fraction of useful protection mode switches
+system.cpu.kern.mode_ticks_kernel 58926919 1.61% 1.61% # number of ticks spent at the given mode
+system.cpu.kern.mode_ticks_user 4685602 0.13% 1.74% # number of ticks spent at the given mode
+system.cpu.kern.mode_ticks_idle 3588260889 98.26% 100.00% # number of ticks spent at the given mode
+system.cpu.kern.swap_context 4204 # number of times the context was actually changed
system.cpu.kern.syscall 329 # number of syscalls executed
system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed
system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed
@@ -112,16 +112,16 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu
system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed
system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed
system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed
-system.cpu.not_idle_fraction 0.020535 # Percentage of non-idle cycles
+system.cpu.not_idle_fraction 0.021461 # Percentage of non-idle cycles
system.cpu.numCycles 0 # number of cpu cycles simulated
-system.cpu.num_insts 57989043 # Number of instructions executed
-system.cpu.num_refs 13753099 # Number of memory references
+system.cpu.num_insts 61760478 # Number of instructions executed
+system.cpu.num_refs 16793874 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
-system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes.
-system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes.
-system.disk0.dma_write_txs 412 # Number of DMA write transactions.
+system.disk0.dma_write_bytes 2702336 # Number of bytes transfered via DMA writes.
+system.disk0.dma_write_full_pages 302 # Number of full page size DMA writes.
+system.disk0.dma_write_txs 408 # Number of DMA write transactions.
system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD).
system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD).
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout
index 88e69a41f..b54e58e73 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 5 2006 22:13:02
-M5 started Fri Oct 6 00:24:58 2006
-M5 executing on zizzer.eecs.umich.edu
+M5 compiled Oct 8 2006 21:57:24
+M5 started Sun Oct 8 21:59:05 2006
+M5 executing on zed.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing
-Exiting @ tick 3493777466 because m5_exit instruction encountered
+Exiting @ tick 3651873858 because m5_exit instruction encountered
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini
index 8722c1b67..95cccfbf2 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=AtomicSimpleCPU
children=workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out
index 6ae80aecf..1138f2dbe 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out
@@ -36,6 +36,7 @@ max_loads_all_threads=0
progress_interval=0
mem=system.physmem
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt
index 9fdf1d513..bbc6e55b5 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 1393697 # Simulator instruction rate (inst/s)
+host_inst_rate 1432213 # Simulator instruction rate (inst/s)
host_mem_usage 147652 # Number of bytes of host memory used
-host_seconds 0.36 # Real time elapsed on the host
-host_tick_rate 1391995 # Simulator tick rate (ticks/s)
+host_seconds 0.35 # Real time elapsed on the host
+host_tick_rate 1430432 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 500000 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout
index 207a0046c..de2559c1c 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout
@@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 11:12:49
-M5 started Sat Oct 7 11:13:17 2006
+M5 compiled Oct 8 2006 14:00:39
+M5 started Sun Oct 8 14:00:58 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-atomic tests/run.py quick/20.eio-short/alpha/eio/simple-atomic
Exiting @ tick 499999 because a thread reached the max instruction count
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini
index f4bdc8171..72ea32994 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini
@@ -56,6 +56,7 @@ physmem=system.physmem
type=TimingSimpleCPU
children=dcache icache l2cache toL2Bus workload
clock=1
+cpu_id=0
defer_registration=false
function_trace=false
function_trace_start=0
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out
index 71a6d33c4..14eb07351 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out
@@ -75,6 +75,7 @@ max_loads_all_threads=0
progress_interval=0
mem=system.cpu.dcache
system=system
+cpu_id=0
workload=system.cpu.workload
clock=1
defer_registration=false
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt
index f8d2c4ea7..ebc70e1f0 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 618043 # Simulator instruction rate (inst/s)
-host_mem_usage 159232 # Number of bytes of host memory used
+host_inst_rate 620088 # Simulator instruction rate (inst/s)
+host_mem_usage 159272 # Number of bytes of host memory used
host_seconds 0.81 # Real time elapsed on the host
-host_tick_rate 843177 # Simulator tick rate (ticks/s)
+host_tick_rate 845969 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 500000 # Number of instructions simulated
sim_seconds 0.000001 # Number of seconds simulated
@@ -152,41 +152,39 @@ system.cpu.l2cache.ReadReq_misses 857 # nu
system.cpu.l2cache.ReadReq_mshr_miss_latency 857 # number of ReadReq MSHR miss cycles
system.cpu.l2cache.ReadReq_mshr_miss_rate 1 # mshr miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_mshr_misses 857 # number of ReadReq MSHR misses
-system.cpu.l2cache.WriteReq_accesses 165 # number of WriteReq accesses(hits+misses)
-system.cpu.l2cache.WriteReq_hits 165 # number of WriteReq hits
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.192532 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 1022 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 857 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 2 # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency
-system.cpu.l2cache.demand_hits 165 # number of demand (read+write) hits
+system.cpu.l2cache.demand_hits 0 # number of demand (read+write) hits
system.cpu.l2cache.demand_miss_latency 1714 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.838552 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate 1 # miss rate for demand accesses
system.cpu.l2cache.demand_misses 857 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
system.cpu.l2cache.demand_mshr_miss_latency 857 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.838552 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate 1 # mshr miss rate for demand accesses
system.cpu.l2cache.demand_mshr_misses 857 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 1022 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 857 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_hits 165 # number of overall hits
+system.cpu.l2cache.overall_hits 0 # number of overall hits
system.cpu.l2cache.overall_miss_latency 1714 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.838552 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate 1 # miss rate for overall accesses
system.cpu.l2cache.overall_misses 857 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
system.cpu.l2cache.overall_mshr_miss_latency 857 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.838552 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate 1 # mshr miss rate for overall accesses
system.cpu.l2cache.overall_mshr_misses 857 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
@@ -203,7 +201,7 @@ system.cpu.l2cache.replacements 0 # nu
system.cpu.l2cache.sampled_refs 857 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.tagsinuse 560.393094 # Cycle average of tags in use
-system.cpu.l2cache.total_refs 165 # Total number of references to valid blocks.
+system.cpu.l2cache.total_refs 0 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout
index 409068a91..076cf0a5a 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout
@@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 7 2006 12:38:12
-M5 started Sat Oct 7 12:38:52 2006
+M5 compiled Oct 8 2006 20:54:51
+M5 started Sun Oct 8 20:55:29 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-timing tests/run.py quick/20.eio-short/alpha/eio/simple-timing
Exiting @ tick 682488 because a thread reached the max instruction count
diff --git a/tests/quick/50.memtest/test.py b/tests/quick/50.memtest/test.py
new file mode 100644
index 000000000..e894b8fb8
--- /dev/null
+++ b/tests/quick/50.memtest/test.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2006 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Ron Dreslinski
+