summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGabe Black <gblack@eecs.umich.edu>2011-02-05 00:16:09 -0800
committerGabe Black <gblack@eecs.umich.edu>2011-02-05 00:16:09 -0800
commit55df9e348c98f25006ac8a95d11bb2cc6b0fdde7 (patch)
treec0315fadc19ae7200085f5a0fd7a8dd2ce6d1076 /tests
parent0aafbe4098dbf41b24b8279bb5e691b702b4383a (diff)
downloadgem5-55df9e348c98f25006ac8a95d11bb2cc6b0fdde7.tar.xz
X86: Add o3 regressions in SE mode.
Exclude bzip2 for now. It works, it just takes too long to run.
Diffstat (limited to 'tests')
-rw-r--r--tests/long/00.gzip/ref/x86/linux/o3-timing/config.ini519
-rwxr-xr-xtests/long/00.gzip/ref/x86/linux/o3-timing/simerr7
-rwxr-xr-xtests/long/00.gzip/ref/x86/linux/o3-timing/simout1070
-rw-r--r--tests/long/00.gzip/ref/x86/linux/o3-timing/stats.txt444
-rw-r--r--tests/long/10.mcf/ref/x86/linux/o3-timing/config.ini519
-rw-r--r--tests/long/10.mcf/ref/x86/linux/o3-timing/mcf.out999
-rwxr-xr-xtests/long/10.mcf/ref/x86/linux/o3-timing/simerr7
-rwxr-xr-xtests/long/10.mcf/ref/x86/linux/o3-timing/simout31
-rw-r--r--tests/long/10.mcf/ref/x86/linux/o3-timing/stats.txt442
-rw-r--r--tests/long/20.parser/ref/x86/linux/o3-timing/config.ini519
-rwxr-xr-xtests/long/20.parser/ref/x86/linux/o3-timing/simerr7
-rwxr-xr-xtests/long/20.parser/ref/x86/linux/o3-timing/simout77
-rw-r--r--tests/long/20.parser/ref/x86/linux/o3-timing/stats.txt454
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/config.ini519
-rwxr-xr-xtests/long/70.twolf/ref/x86/linux/o3-timing/simerr7
-rwxr-xr-xtests/long/70.twolf/ref/x86/linux/o3-timing/simout32
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.out276
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pin17
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl111
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl22
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sav18
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sv219
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/smred.twf29
-rw-r--r--tests/long/70.twolf/ref/x86/linux/o3-timing/stats.txt443
-rw-r--r--tests/quick/00.hello/ref/x86/linux/o3-timing/config.ini519
-rwxr-xr-xtests/quick/00.hello/ref/x86/linux/o3-timing/simerr7
-rwxr-xr-xtests/quick/00.hello/ref/x86/linux/o3-timing/simout16
-rw-r--r--tests/quick/00.hello/ref/x86/linux/o3-timing/stats.txt437
28 files changed, 7447 insertions, 0 deletions
diff --git a/tests/long/00.gzip/ref/x86/linux/o3-timing/config.ini b/tests/long/00.gzip/ref/x86/linux/o3-timing/config.ini
new file mode 100644
index 000000000..f7f0c46d4
--- /dev/null
+++ b/tests/long/00.gzip/ref/x86/linux/o3-timing/config.ini
@@ -0,0 +1,519 @@
+[root]
+type=Root
+children=system
+time_sync_enable=false
+time_sync_period=100000000000
+time_sync_spin_threshold=100000000
+
+[system]
+type=System
+children=cpu membus physmem
+mem_mode=atomic
+physmem=system.physmem
+
+[system.cpu]
+type=DerivO3CPU
+children=dcache dtb fuPool icache itb l2cache toL2Bus tracer workload
+BTBEntries=4096
+BTBTagSize=16
+LFSTSize=1024
+LQEntries=32
+RASSize=16
+SQEntries=32
+SSITSize=1024
+activity=0
+backComSize=5
+cachePorts=200
+checker=Null
+choiceCtrBits=2
+choicePredictorSize=8192
+clock=500
+commitToDecodeDelay=1
+commitToFetchDelay=1
+commitToIEWDelay=1
+commitToRenameDelay=1
+commitWidth=8
+cpu_id=0
+decodeToFetchDelay=1
+decodeToRenameDelay=1
+decodeWidth=8
+defer_registration=false
+dispatchWidth=8
+do_checkpoint_insts=true
+do_statistics_insts=true
+dtb=system.cpu.dtb
+fetchToDecodeDelay=1
+fetchTrapLatency=1
+fetchWidth=8
+forwardComSize=5
+fuPool=system.cpu.fuPool
+function_trace=false
+function_trace_start=0
+globalCtrBits=2
+globalHistoryBits=13
+globalPredictorSize=8192
+iewToCommitDelay=1
+iewToDecodeDelay=1
+iewToFetchDelay=1
+iewToRenameDelay=1
+instShiftAmt=2
+issueToExecuteDelay=1
+issueWidth=8
+itb=system.cpu.itb
+localCtrBits=2
+localHistoryBits=11
+localHistoryTableSize=2048
+localPredictorSize=2048
+max_insts_all_threads=0
+max_insts_any_thread=0
+max_loads_all_threads=0
+max_loads_any_thread=0
+numIQEntries=64
+numPhysFloatRegs=256
+numPhysIntRegs=256
+numROBEntries=192
+numRobs=1
+numThreads=1
+phase=0
+predType=tournament
+progress_interval=0
+renameToDecodeDelay=1
+renameToFetchDelay=1
+renameToIEWDelay=2
+renameToROBDelay=1
+renameWidth=8
+smtCommitPolicy=RoundRobin
+smtFetchPolicy=SingleThread
+smtIQPolicy=Partitioned
+smtIQThreshold=100
+smtLSQPolicy=Partitioned
+smtLSQThreshold=100
+smtNumFetchingThreads=1
+smtROBPolicy=Partitioned
+smtROBThreshold=100
+squashWidth=8
+system=system
+tracer=system.cpu.tracer
+trapLatency=13
+wbDepth=1
+wbWidth=8
+workload=system.cpu.workload
+dcache_port=system.cpu.dcache.cpu_side
+icache_port=system.cpu.icache.cpu_side
+
+[system.cpu.dcache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=262144
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.dcache_port
+mem_side=system.cpu.toL2Bus.port[1]
+
+[system.cpu.dtb]
+type=X86TLB
+size=64
+
+[system.cpu.fuPool]
+type=FUPool
+children=FUList0 FUList1 FUList2 FUList3 FUList4 FUList5 FUList6 FUList7 FUList8
+FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 system.cpu.fuPool.FUList8
+
+[system.cpu.fuPool.FUList0]
+type=FUDesc
+children=opList
+count=6
+opList=system.cpu.fuPool.FUList0.opList
+
+[system.cpu.fuPool.FUList0.opList]
+type=OpDesc
+issueLat=1
+opClass=IntAlu
+opLat=1
+
+[system.cpu.fuPool.FUList1]
+type=FUDesc
+children=opList0 opList1
+count=2
+opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1
+
+[system.cpu.fuPool.FUList1.opList0]
+type=OpDesc
+issueLat=1
+opClass=IntMult
+opLat=3
+
+[system.cpu.fuPool.FUList1.opList1]
+type=OpDesc
+issueLat=19
+opClass=IntDiv
+opLat=20
+
+[system.cpu.fuPool.FUList2]
+type=FUDesc
+children=opList0 opList1 opList2
+count=4
+opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2
+
+[system.cpu.fuPool.FUList2.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatAdd
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList1]
+type=OpDesc
+issueLat=1
+opClass=FloatCmp
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList2]
+type=OpDesc
+issueLat=1
+opClass=FloatCvt
+opLat=2
+
+[system.cpu.fuPool.FUList3]
+type=FUDesc
+children=opList0 opList1 opList2
+count=2
+opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2
+
+[system.cpu.fuPool.FUList3.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatMult
+opLat=4
+
+[system.cpu.fuPool.FUList3.opList1]
+type=OpDesc
+issueLat=12
+opClass=FloatDiv
+opLat=12
+
+[system.cpu.fuPool.FUList3.opList2]
+type=OpDesc
+issueLat=24
+opClass=FloatSqrt
+opLat=24
+
+[system.cpu.fuPool.FUList4]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList4.opList
+
+[system.cpu.fuPool.FUList4.opList]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList5]
+type=FUDesc
+children=opList00 opList01 opList02 opList03 opList04 opList05 opList06 opList07 opList08 opList09 opList10 opList11 opList12 opList13 opList14 opList15 opList16 opList17 opList18 opList19
+count=4
+opList=system.cpu.fuPool.FUList5.opList00 system.cpu.fuPool.FUList5.opList01 system.cpu.fuPool.FUList5.opList02 system.cpu.fuPool.FUList5.opList03 system.cpu.fuPool.FUList5.opList04 system.cpu.fuPool.FUList5.opList05 system.cpu.fuPool.FUList5.opList06 system.cpu.fuPool.FUList5.opList07 system.cpu.fuPool.FUList5.opList08 system.cpu.fuPool.FUList5.opList09 system.cpu.fuPool.FUList5.opList10 system.cpu.fuPool.FUList5.opList11 system.cpu.fuPool.FUList5.opList12 system.cpu.fuPool.FUList5.opList13 system.cpu.fuPool.FUList5.opList14 system.cpu.fuPool.FUList5.opList15 system.cpu.fuPool.FUList5.opList16 system.cpu.fuPool.FUList5.opList17 system.cpu.fuPool.FUList5.opList18 system.cpu.fuPool.FUList5.opList19
+
+[system.cpu.fuPool.FUList5.opList00]
+type=OpDesc
+issueLat=1
+opClass=SimdAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList01]
+type=OpDesc
+issueLat=1
+opClass=SimdAddAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList02]
+type=OpDesc
+issueLat=1
+opClass=SimdAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList03]
+type=OpDesc
+issueLat=1
+opClass=SimdCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList04]
+type=OpDesc
+issueLat=1
+opClass=SimdCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList05]
+type=OpDesc
+issueLat=1
+opClass=SimdMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList06]
+type=OpDesc
+issueLat=1
+opClass=SimdMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList07]
+type=OpDesc
+issueLat=1
+opClass=SimdMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList08]
+type=OpDesc
+issueLat=1
+opClass=SimdShift
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList09]
+type=OpDesc
+issueLat=1
+opClass=SimdShiftAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList10]
+type=OpDesc
+issueLat=1
+opClass=SimdSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList11]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList12]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList13]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList14]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList15]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatDiv
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList16]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList17]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList18]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList19]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList6]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList6.opList
+
+[system.cpu.fuPool.FUList6.opList]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList7]
+type=FUDesc
+children=opList0 opList1
+count=4
+opList=system.cpu.fuPool.FUList7.opList0 system.cpu.fuPool.FUList7.opList1
+
+[system.cpu.fuPool.FUList7.opList0]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList7.opList1]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList8]
+type=FUDesc
+children=opList
+count=1
+opList=system.cpu.fuPool.FUList8.opList
+
+[system.cpu.fuPool.FUList8.opList]
+type=OpDesc
+issueLat=3
+opClass=IprAccess
+opLat=3
+
+[system.cpu.icache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=131072
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.icache_port
+mem_side=system.cpu.toL2Bus.port[0]
+
+[system.cpu.itb]
+type=X86TLB
+size=64
+
+[system.cpu.l2cache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=2097152
+subblock_size=0
+tgts_per_mshr=5
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.toL2Bus.port[2]
+mem_side=system.membus.port[1]
+
+[system.cpu.toL2Bus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
+
+[system.cpu.tracer]
+type=ExeTracer
+
+[system.cpu.workload]
+type=LiveProcess
+cmd=gzip input.log 1
+cwd=build/X86_SE/tests/opt/long/00.gzip/x86/linux/o3-timing
+egid=100
+env=
+errout=cerr
+euid=100
+executable=/dist/m5/cpu2000/binaries/x86/linux/gzip
+gid=100
+input=cin
+max_stack_size=67108864
+output=cout
+pid=100
+ppid=99
+simpoint=0
+system=system
+uid=100
+
+[system.membus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.physmem.port[0] system.cpu.l2cache.mem_side
+
+[system.physmem]
+type=PhysicalMemory
+file=
+latency=30000
+latency_var=0
+null=false
+range=0:134217727
+zero=false
+port=system.membus.port[0]
+
diff --git a/tests/long/00.gzip/ref/x86/linux/o3-timing/simerr b/tests/long/00.gzip/ref/x86/linux/o3-timing/simerr
new file mode 100755
index 000000000..94d399eab
--- /dev/null
+++ b/tests/long/00.gzip/ref/x86/linux/o3-timing/simerr
@@ -0,0 +1,7 @@
+warn: Sockets disabled, not accepting gdb connections
+For more information see: http://www.m5sim.org/warn/d946bea6
+warn: instruction 'fnstcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+warn: instruction 'fldcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+hack: be nice to actually delete the event here
diff --git a/tests/long/00.gzip/ref/x86/linux/o3-timing/simout b/tests/long/00.gzip/ref/x86/linux/o3-timing/simout
new file mode 100755
index 000000000..f9fa6a62e
--- /dev/null
+++ b/tests/long/00.gzip/ref/x86/linux/o3-timing/simout
@@ -0,0 +1,1070 @@
+M5 Simulator System
+
+Copyright (c) 2001-2008
+The Regents of The University of Michigan
+All Rights Reserved
+
+
+M5 compiled Jan 31 2011 16:34:44
+M5 revision 1b98eea40540 7883 default qtip tip x86o3regressions.patch
+M5 started Jan 31 2011 16:34:46
+M5 executing on burrito
+command line: build/X86_SE/m5.opt -d build/X86_SE/tests/opt/long/00.gzip/x86/linux/o3-timing -re tests/run.py build/X86_SE/tests/opt/long/00.gzip/x86/linux/o3-timing
+Global frequency set at 1000000000000 ticks per second
+info: Entering event queue @ 0. Starting simulation...
+spec_init
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+Loading Input Data
+Duplicating 262144 bytes
+Duplicating 524288 bytes
+Input data 1048576 bytes in length
+Compressing Input Data, level 1
+Compressed data 108074 bytes in length
+Uncompressing Data
+Uncompressed data 1048576 bytes in length
+Uncompressed data compared correctly
+Compressing Input Data, level 3
+Compressed data 97831 bytes in length
+Uncompressing Data
+Uncompressed data 1048576 bytes in length
+Uncompressed data compared correctly
+Compressing Input Data, level 5
+Compressed data 83382 bytes in length
+Uncompressing Data
+Uncompressed data 1048576 bytes in length
+Uncompressed data compared correctly
+Compressing Input Data, level 7
+Compressed data 76606 bytes in length
+Uncompressing Data
+Uncompressed data 1048576 bytes in length
+Uncompressed data compared correctly
+Compressing Input Data, level 9
+Compressed data 73189 bytes in length
+Uncompressing Data
+Uncompressed data 1048576 bytes in length
+Uncompressed data compared correctly
+Tested 1MB buffer: OK!
+Exiting @ tick 772390499500 because target called exit()
diff --git a/tests/long/00.gzip/ref/x86/linux/o3-timing/stats.txt b/tests/long/00.gzip/ref/x86/linux/o3-timing/stats.txt
new file mode 100644
index 000000000..6441cfabc
--- /dev/null
+++ b/tests/long/00.gzip/ref/x86/linux/o3-timing/stats.txt
@@ -0,0 +1,444 @@
+
+---------- Begin Simulation Statistics ----------
+host_inst_rate 136188 # Simulator instruction rate (inst/s)
+host_mem_usage 231788 # Number of bytes of host memory used
+host_seconds 11906.26 # Real time elapsed on the host
+host_tick_rate 64872637 # Simulator tick rate (ticks/s)
+sim_freq 1000000000000 # Frequency of simulated ticks
+sim_insts 1621493982 # Number of instructions simulated
+sim_seconds 0.772390 # Number of seconds simulated
+sim_ticks 772390499500 # Number of ticks simulated
+system.cpu.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
+system.cpu.BPredUnit.BTBHits 126254885 # Number of BTB hits
+system.cpu.BPredUnit.BTBLookups 126894033 # Number of BTB lookups
+system.cpu.BPredUnit.RASInCorrect 0 # Number of incorrect RAS predictions.
+system.cpu.BPredUnit.condIncorrect 5933287 # Number of conditional branches incorrect
+system.cpu.BPredUnit.condPredicted 126894073 # Number of conditional branches predicted
+system.cpu.BPredUnit.lookups 126894073 # Number of BP lookups
+system.cpu.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
+system.cpu.commit.COM:branches 107161579 # Number of branches committed
+system.cpu.commit.COM:bw_lim_events 3710402 # number cycles where commit BW limit reached
+system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits
+system.cpu.commit.COM:committed_per_cycle::samples 1511501895 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::mean 1.072770 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::stdev 1.173458 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::underflows 0 0.00% 0.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::0 505879323 33.47% 33.47% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::1 677452709 44.82% 78.29% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::2 153213861 10.14% 88.43% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::3 112394621 7.44% 95.86% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::4 32585093 2.16% 98.02% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::5 19016713 1.26% 99.27% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::6 5421676 0.36% 99.63% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::7 1827497 0.12% 99.75% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::8 3710402 0.25% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::overflows 0 0.00% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::min_value 0 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::max_value 8 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::total 1511501895 # Number of insts commited each cycle
+system.cpu.commit.COM:count 1621493982 # Number of instructions committed
+system.cpu.commit.COM:loads 419042125 # Number of loads committed
+system.cpu.commit.COM:membars 0 # Number of memory barriers committed
+system.cpu.commit.COM:refs 607228182 # Number of memory references committed
+system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
+system.cpu.commit.branchMispredicts 5933318 # The number of times a branch was mispredicted
+system.cpu.commit.commitCommittedInsts 1621493982 # The number of committed instructions
+system.cpu.commit.commitNonSpecStalls 50 # The number of times commit has been forced to stall to communicate backwards
+system.cpu.commit.commitSquashedInsts 227874068 # The number of squashed insts skipped by commit
+system.cpu.committedInsts 1621493982 # Number of Instructions Simulated
+system.cpu.committedInsts_total 1621493982 # Number of Instructions Simulated
+system.cpu.cpi 0.952690 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 0.952690 # CPI: Total CPI of All Threads
+system.cpu.dcache.ReadReq_accesses 326327666 # number of ReadReq accesses(hits+misses)
+system.cpu.dcache.ReadReq_avg_miss_latency 10363.748203 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 7391.735933 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 326125265 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 2097633000 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.000620 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 202401 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_hits 1725 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 1483344000 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.000615 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_misses 200676 # number of ReadReq MSHR misses
+system.cpu.dcache.WriteReq_accesses 188186057 # number of WriteReq accesses(hits+misses)
+system.cpu.dcache.WriteReq_avg_miss_latency 19667.198248 # average WriteReq miss latency
+system.cpu.dcache.WriteReq_avg_mshr_miss_latency 10021.451346 # average WriteReq mshr miss latency
+system.cpu.dcache.WriteReq_hits 186945733 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 24393698000 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.006591 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 1240324 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_hits 994745 # number of WriteReq MSHR hits
+system.cpu.dcache.WriteReq_mshr_miss_latency 2461058000 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.001305 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 245579 # number of WriteReq MSHR misses
+system.cpu.dcache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_blocked_cycles::no_targets 15789.833755 # average number of cycles each access was blocked
+system.cpu.dcache.avg_refs 1149.728625 # Average number of references to valid blocks.
+system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked::no_targets 29234 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_targets 461600000 # number of cycles access was blocked
+system.cpu.dcache.cache_copies 0 # number of cache copies performed
+system.cpu.dcache.demand_accesses 514513723 # number of demand (read+write) accesses
+system.cpu.dcache.demand_avg_miss_latency 18362.010085 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 8838.897043 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 513070998 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 26491331000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.002804 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 1442725 # number of demand (read+write) misses
+system.cpu.dcache.demand_mshr_hits 996470 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_miss_latency 3944402000 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.000867 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 446255 # number of demand (read+write) MSHR misses
+system.cpu.dcache.fast_writes 0 # number of fast writes performed
+system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.dcache.occ_%::0 0.999781 # Average percentage of cache occupancy
+system.cpu.dcache.occ_blocks::0 4095.101758 # Average occupied blocks per context
+system.cpu.dcache.overall_accesses 514513723 # number of overall (read+write) accesses
+system.cpu.dcache.overall_avg_miss_latency 18362.010085 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 8838.897043 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_hits 513070998 # number of overall hits
+system.cpu.dcache.overall_miss_latency 26491331000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.002804 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 1442725 # number of overall misses
+system.cpu.dcache.overall_mshr_hits 996470 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_miss_latency 3944402000 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.000867 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 446255 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.dcache.replacements 442158 # number of replacements
+system.cpu.dcache.sampled_refs 446254 # Sample count of references to valid blocks.
+system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.dcache.tagsinuse 4095.101758 # Cycle average of tags in use
+system.cpu.dcache.total_refs 513070998 # Total number of references to valid blocks.
+system.cpu.dcache.warmup_cycle 331552000 # Cycle when the warmup percentage was hit.
+system.cpu.dcache.writebacks 398281 # number of writebacks
+system.cpu.decode.DECODE:BlockedCycles 176333648 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:DecodedInsts 1886463332 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 320369444 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 981528406 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 33063147 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:UnblockCycles 33270397 # Number of cycles decode is unblocking
+system.cpu.fetch.Branches 126894073 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 119630706 # Number of cache lines fetched
+system.cpu.fetch.Cycles 1056772647 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 432705 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 1026147627 # Number of instructions fetch has processed
+system.cpu.fetch.MiscStallCycles 46 # Number of cycles fetch has spent waiting on interrupts, or bad addresses, or out of MSHRs
+system.cpu.fetch.SquashCycles 9324994 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.082144 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 119630706 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.fetch.predictedBranches 126254885 # Number of branches that fetch has predicted taken
+system.cpu.fetch.rate 0.664267 # Number of inst fetches per cycle
+system.cpu.fetch.rateDist::samples 1544565042 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::mean 1.230490 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::stdev 1.292215 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::underflows 0 0.00% 0.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::0 522111775 33.80% 33.80% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::1 496583342 32.15% 65.95% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::2 273451194 17.70% 83.66% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::3 224891951 14.56% 98.22% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::4 8280335 0.54% 98.75% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::5 1557581 0.10% 98.85% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::6 722 0.00% 98.85% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::7 8665 0.00% 98.86% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::8 17679477 1.14% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::overflows 0 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::min_value 0 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::max_value 8 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::total 1544565042 # Number of instructions fetched each cycle (Total)
+system.cpu.icache.ReadReq_accesses 119630706 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_avg_miss_latency 37171.926007 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 35433.712121 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 119629787 # number of ReadReq hits
+system.cpu.icache.ReadReq_miss_latency 34161000 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_rate 0.000008 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_misses 919 # number of ReadReq misses
+system.cpu.icache.ReadReq_mshr_hits 127 # number of ReadReq MSHR hits
+system.cpu.icache.ReadReq_mshr_miss_latency 28063500 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.000007 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_misses 792 # number of ReadReq MSHR misses
+system.cpu.icache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_refs 151047.710859 # Average number of references to valid blocks.
+system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.cache_copies 0 # number of cache copies performed
+system.cpu.icache.demand_accesses 119630706 # number of demand (read+write) accesses
+system.cpu.icache.demand_avg_miss_latency 37171.926007 # average overall miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency 35433.712121 # average overall mshr miss latency
+system.cpu.icache.demand_hits 119629787 # number of demand (read+write) hits
+system.cpu.icache.demand_miss_latency 34161000 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_rate 0.000008 # miss rate for demand accesses
+system.cpu.icache.demand_misses 919 # number of demand (read+write) misses
+system.cpu.icache.demand_mshr_hits 127 # number of demand (read+write) MSHR hits
+system.cpu.icache.demand_mshr_miss_latency 28063500 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_rate 0.000007 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_misses 792 # number of demand (read+write) MSHR misses
+system.cpu.icache.fast_writes 0 # number of fast writes performed
+system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.icache.occ_%::0 0.352078 # Average percentage of cache occupancy
+system.cpu.icache.occ_blocks::0 721.055018 # Average occupied blocks per context
+system.cpu.icache.overall_accesses 119630706 # number of overall (read+write) accesses
+system.cpu.icache.overall_avg_miss_latency 37171.926007 # average overall miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 35433.712121 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.icache.overall_hits 119629787 # number of overall hits
+system.cpu.icache.overall_miss_latency 34161000 # number of overall miss cycles
+system.cpu.icache.overall_miss_rate 0.000008 # miss rate for overall accesses
+system.cpu.icache.overall_misses 919 # number of overall misses
+system.cpu.icache.overall_mshr_hits 127 # number of overall MSHR hits
+system.cpu.icache.overall_mshr_miss_latency 28063500 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_rate 0.000007 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_misses 792 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.icache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.icache.replacements 4 # number of replacements
+system.cpu.icache.sampled_refs 792 # Sample count of references to valid blocks.
+system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.icache.tagsinuse 721.055018 # Cycle average of tags in use
+system.cpu.icache.total_refs 119629787 # Total number of references to valid blocks.
+system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.icache.writebacks 0 # number of writebacks
+system.cpu.idleCycles 215958 # Total number of cycles that the CPU has spent unscheduled due to idling
+system.cpu.iew.EXEC:branches 108586362 # Number of branches executed
+system.cpu.iew.EXEC:nop 0 # number of nop insts executed
+system.cpu.iew.EXEC:rate 1.090888 # Inst execution rate
+system.cpu.iew.EXEC:refs 624680336 # number of memory reference insts executed
+system.cpu.iew.EXEC:stores 190102881 # Number of stores executed
+system.cpu.iew.EXEC:swp 0 # number of swp insts executed
+system.cpu.iew.WB:consumers 2506292363 # num instructions consuming a value
+system.cpu.iew.WB:count 1680860111 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.529936 # average fanout of values written-back
+system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
+system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
+system.cpu.iew.WB:producers 1328173821 # num instructions producing a value
+system.cpu.iew.WB:rate 1.088090 # insts written-back per cycle
+system.cpu.iew.WB:sent 1681411195 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 6122546 # Number of branch mispredicts detected at execute
+system.cpu.iew.iewBlockCycles 1253236 # Number of cycles IEW is blocking
+system.cpu.iew.iewDispLoadInsts 492554241 # Number of dispatched load instructions
+system.cpu.iew.iewDispNonSpecInsts 66 # Number of dispatched non-speculative instructions
+system.cpu.iew.iewDispSquashedInsts 3215387 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispStoreInsts 210212351 # Number of dispatched store instructions
+system.cpu.iew.iewDispatchedInsts 1849358863 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 434577455 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 8332046 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 1685183738 # Number of executed instructions
+system.cpu.iew.iewIQFullEvents 18939 # Number of times the IQ has become full, causing a stall
+system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
+system.cpu.iew.iewLSQFullEvents 0 # Number of times the LSQ has become full, causing a stall
+system.cpu.iew.iewSquashCycles 33063147 # Number of cycles IEW is squashing
+system.cpu.iew.iewUnblockCycles 72665 # Number of cycles IEW is unblocking
+system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding
+system.cpu.iew.lsq.thread.0.cacheBlocked 29234 # Number of times an access to memory failed due to the cache being blocked
+system.cpu.iew.lsq.thread.0.forwLoads 108234700 # Number of loads that had data forwarded from stores
+system.cpu.iew.lsq.thread.0.ignoredResponses 16690 # Number of memory responses ignored because the instruction is squashed
+system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.memOrderViolation 3968261 # Number of memory ordering violations
+system.cpu.iew.lsq.thread.0.rescheduledLoads 13 # Number of loads that were rescheduled
+system.cpu.iew.lsq.thread.0.squashedLoads 73512116 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedStores 22026294 # Number of stores squashed
+system.cpu.iew.memOrderViolationEvents 3968261 # Number of memory order violations
+system.cpu.iew.predictedNotTakenIncorrect 2078 # Number of branches that were predicted not taken incorrectly
+system.cpu.iew.predictedTakenIncorrect 6120468 # Number of branches that were predicted taken incorrectly
+system.cpu.ipc 1.049659 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 1.049659 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0::No_OpClass 24157467 1.43% 1.43% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntAlu 1040578234 61.44% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntMult 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntDiv 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatAdd 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCmp 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCvt 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatMult 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatDiv 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatSqrt 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAdd 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAddAcc 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAlu 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCmp 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCvt 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMisc 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMult 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMultAcc 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShift 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShiftAcc 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdSqrt 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAdd 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAlu 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCmp 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCvt 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatDiv 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMisc 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMult 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMultAcc 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatSqrt 0 0.00% 62.87% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemRead 438214492 25.88% 88.75% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemWrite 190565591 11.25% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IprAccess 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::InstPrefetch 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::total 1693515784 # Type of FU issued
+system.cpu.iq.ISSUE:fu_busy_cnt 252744 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_rate 0.000149 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_full::No_OpClass 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntAlu 40 0.02% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAddAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAlu 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMisc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMultAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShift 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShiftAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAlu 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMisc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMultAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemRead 250833 99.24% 99.26% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemWrite 1871 0.74% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IprAccess 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::InstPrefetch 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:issued_per_cycle::samples 1544565042 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::mean 1.096435 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::stdev 0.983023 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::underflows 0 0.00% 0.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::0 454758636 29.44% 29.44% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::1 667103033 43.19% 72.63% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::2 281275831 18.21% 90.84% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::3 105166888 6.81% 97.65% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::4 33264638 2.15% 99.81% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::5 2679834 0.17% 99.98% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::6 311387 0.02% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::7 3979 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::8 816 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::overflows 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::min_value 0 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::max_value 8 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::total 1544565042 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:rate 1.096282 # Inst issue rate
+system.cpu.iq.iqInstsAdded 1849358797 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 1693515784 # Number of instructions issued
+system.cpu.iq.iqNonSpecInstsAdded 66 # Number of non-speculative instructions added to the IQ
+system.cpu.iq.iqSquashedInstsExamined 226765112 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedInstsIssued 1273 # Number of squashed instructions issued
+system.cpu.iq.iqSquashedNonSpecRemoved 16 # Number of squashed non-spec instructions that were removed
+system.cpu.iq.iqSquashedOperandsExamined 584800312 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadExReq_accesses 245580 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_avg_miss_latency 34276.926221 # average ReadExReq miss latency
+system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 31075.745964 # average ReadExReq mshr miss latency
+system.cpu.l2cache.ReadExReq_hits 186864 # number of ReadExReq hits
+system.cpu.l2cache.ReadExReq_miss_latency 2012604000 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_rate 0.239091 # miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_misses 58716 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 1824643500 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_mshr_miss_rate 0.239091 # mshr miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_mshr_misses 58716 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadReq_accesses 201467 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_avg_miss_latency 34133.939861 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 31003.577487 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_hits 169042 # number of ReadReq hits
+system.cpu.l2cache.ReadReq_miss_latency 1106793000 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.160944 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 32425 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 1005291000 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.160944 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 32425 # number of ReadReq MSHR misses
+system.cpu.l2cache.Writeback_accesses 398281 # number of Writeback accesses(hits+misses)
+system.cpu.l2cache.Writeback_hits 398281 # number of Writeback hits
+system.cpu.l2cache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_refs 4.844642 # Average number of references to valid blocks.
+system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.cache_copies 0 # number of cache copies performed
+system.cpu.l2cache.demand_accesses 447047 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_avg_miss_latency 34226.056330 # average overall miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 31050.070769 # average overall mshr miss latency
+system.cpu.l2cache.demand_hits 355906 # number of demand (read+write) hits
+system.cpu.l2cache.demand_miss_latency 3119397000 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.203873 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 91141 # number of demand (read+write) misses
+system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
+system.cpu.l2cache.demand_mshr_miss_latency 2829934500 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.203873 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 91141 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.fast_writes 0 # number of fast writes performed
+system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.l2cache.occ_%::0 0.058867 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_%::1 0.490866 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_blocks::0 1928.938344 # Average occupied blocks per context
+system.cpu.l2cache.occ_blocks::1 16084.711341 # Average occupied blocks per context
+system.cpu.l2cache.overall_accesses 447047 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_avg_miss_latency 34226.056330 # average overall miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 31050.070769 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_hits 355906 # number of overall hits
+system.cpu.l2cache.overall_miss_latency 3119397000 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.203873 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 91141 # number of overall misses
+system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
+system.cpu.l2cache.overall_mshr_miss_latency 2829934500 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.203873 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 91141 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.l2cache.replacements 72873 # number of replacements
+system.cpu.l2cache.sampled_refs 88473 # Sample count of references to valid blocks.
+system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.l2cache.tagsinuse 18013.649684 # Cycle average of tags in use
+system.cpu.l2cache.total_refs 428620 # Total number of references to valid blocks.
+system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.l2cache.writebacks 58405 # number of writebacks
+system.cpu.memDep0.conflictingLoads 289036318 # Number of conflicting loads.
+system.cpu.memDep0.conflictingStores 113016383 # Number of conflicting stores.
+system.cpu.memDep0.insertedLoads 492554241 # Number of loads inserted to the mem dependence unit.
+system.cpu.memDep0.insertedStores 210212351 # Number of stores inserted to the mem dependence unit.
+system.cpu.numCycles 1544781000 # number of cpu cycles simulated
+system.cpu.rename.RENAME:BlockCycles 55578139 # Number of cycles rename is blocking
+system.cpu.rename.RENAME:CommittedMaps 1617994650 # Number of HB maps that are committed
+system.cpu.rename.RENAME:IQFullEvents 65710608 # Number of times rename has blocked due to IQ full
+system.cpu.rename.RENAME:IdleCycles 361165681 # Number of cycles rename is idle
+system.cpu.rename.RENAME:LSQFullEvents 36822801 # Number of times rename has blocked due to LSQ full
+system.cpu.rename.RENAME:ROBFullEvents 16 # Number of times rename has blocked due to ROB full
+system.cpu.rename.RENAME:RenameLookups 5668050381 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 1874385455 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 1871676358 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 968560202 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 33063147 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:UnblockCycles 126195704 # Number of cycles rename is unblocking
+system.cpu.rename.RENAME:UndoneMaps 253681708 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 2169 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:serializingInsts 67 # count of serializing insts renamed
+system.cpu.rename.RENAME:skidInsts 186996608 # count of insts added to the skid buffer
+system.cpu.rename.RENAME:tempSerializingInsts 71 # count of temporary serializing insts renamed
+system.cpu.timesIdled 45108 # Number of times that the entire CPU went into an idle state and unscheduled itself
+system.cpu.workload.PROG:num_syscalls 48 # Number of system calls
+
+---------- End Simulation Statistics ----------
diff --git a/tests/long/10.mcf/ref/x86/linux/o3-timing/config.ini b/tests/long/10.mcf/ref/x86/linux/o3-timing/config.ini
new file mode 100644
index 000000000..60f53a64a
--- /dev/null
+++ b/tests/long/10.mcf/ref/x86/linux/o3-timing/config.ini
@@ -0,0 +1,519 @@
+[root]
+type=Root
+children=system
+time_sync_enable=false
+time_sync_period=100000000000
+time_sync_spin_threshold=100000000
+
+[system]
+type=System
+children=cpu membus physmem
+mem_mode=atomic
+physmem=system.physmem
+
+[system.cpu]
+type=DerivO3CPU
+children=dcache dtb fuPool icache itb l2cache toL2Bus tracer workload
+BTBEntries=4096
+BTBTagSize=16
+LFSTSize=1024
+LQEntries=32
+RASSize=16
+SQEntries=32
+SSITSize=1024
+activity=0
+backComSize=5
+cachePorts=200
+checker=Null
+choiceCtrBits=2
+choicePredictorSize=8192
+clock=500
+commitToDecodeDelay=1
+commitToFetchDelay=1
+commitToIEWDelay=1
+commitToRenameDelay=1
+commitWidth=8
+cpu_id=0
+decodeToFetchDelay=1
+decodeToRenameDelay=1
+decodeWidth=8
+defer_registration=false
+dispatchWidth=8
+do_checkpoint_insts=true
+do_statistics_insts=true
+dtb=system.cpu.dtb
+fetchToDecodeDelay=1
+fetchTrapLatency=1
+fetchWidth=8
+forwardComSize=5
+fuPool=system.cpu.fuPool
+function_trace=false
+function_trace_start=0
+globalCtrBits=2
+globalHistoryBits=13
+globalPredictorSize=8192
+iewToCommitDelay=1
+iewToDecodeDelay=1
+iewToFetchDelay=1
+iewToRenameDelay=1
+instShiftAmt=2
+issueToExecuteDelay=1
+issueWidth=8
+itb=system.cpu.itb
+localCtrBits=2
+localHistoryBits=11
+localHistoryTableSize=2048
+localPredictorSize=2048
+max_insts_all_threads=0
+max_insts_any_thread=0
+max_loads_all_threads=0
+max_loads_any_thread=0
+numIQEntries=64
+numPhysFloatRegs=256
+numPhysIntRegs=256
+numROBEntries=192
+numRobs=1
+numThreads=1
+phase=0
+predType=tournament
+progress_interval=0
+renameToDecodeDelay=1
+renameToFetchDelay=1
+renameToIEWDelay=2
+renameToROBDelay=1
+renameWidth=8
+smtCommitPolicy=RoundRobin
+smtFetchPolicy=SingleThread
+smtIQPolicy=Partitioned
+smtIQThreshold=100
+smtLSQPolicy=Partitioned
+smtLSQThreshold=100
+smtNumFetchingThreads=1
+smtROBPolicy=Partitioned
+smtROBThreshold=100
+squashWidth=8
+system=system
+tracer=system.cpu.tracer
+trapLatency=13
+wbDepth=1
+wbWidth=8
+workload=system.cpu.workload
+dcache_port=system.cpu.dcache.cpu_side
+icache_port=system.cpu.icache.cpu_side
+
+[system.cpu.dcache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=262144
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.dcache_port
+mem_side=system.cpu.toL2Bus.port[1]
+
+[system.cpu.dtb]
+type=X86TLB
+size=64
+
+[system.cpu.fuPool]
+type=FUPool
+children=FUList0 FUList1 FUList2 FUList3 FUList4 FUList5 FUList6 FUList7 FUList8
+FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 system.cpu.fuPool.FUList8
+
+[system.cpu.fuPool.FUList0]
+type=FUDesc
+children=opList
+count=6
+opList=system.cpu.fuPool.FUList0.opList
+
+[system.cpu.fuPool.FUList0.opList]
+type=OpDesc
+issueLat=1
+opClass=IntAlu
+opLat=1
+
+[system.cpu.fuPool.FUList1]
+type=FUDesc
+children=opList0 opList1
+count=2
+opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1
+
+[system.cpu.fuPool.FUList1.opList0]
+type=OpDesc
+issueLat=1
+opClass=IntMult
+opLat=3
+
+[system.cpu.fuPool.FUList1.opList1]
+type=OpDesc
+issueLat=19
+opClass=IntDiv
+opLat=20
+
+[system.cpu.fuPool.FUList2]
+type=FUDesc
+children=opList0 opList1 opList2
+count=4
+opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2
+
+[system.cpu.fuPool.FUList2.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatAdd
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList1]
+type=OpDesc
+issueLat=1
+opClass=FloatCmp
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList2]
+type=OpDesc
+issueLat=1
+opClass=FloatCvt
+opLat=2
+
+[system.cpu.fuPool.FUList3]
+type=FUDesc
+children=opList0 opList1 opList2
+count=2
+opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2
+
+[system.cpu.fuPool.FUList3.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatMult
+opLat=4
+
+[system.cpu.fuPool.FUList3.opList1]
+type=OpDesc
+issueLat=12
+opClass=FloatDiv
+opLat=12
+
+[system.cpu.fuPool.FUList3.opList2]
+type=OpDesc
+issueLat=24
+opClass=FloatSqrt
+opLat=24
+
+[system.cpu.fuPool.FUList4]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList4.opList
+
+[system.cpu.fuPool.FUList4.opList]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList5]
+type=FUDesc
+children=opList00 opList01 opList02 opList03 opList04 opList05 opList06 opList07 opList08 opList09 opList10 opList11 opList12 opList13 opList14 opList15 opList16 opList17 opList18 opList19
+count=4
+opList=system.cpu.fuPool.FUList5.opList00 system.cpu.fuPool.FUList5.opList01 system.cpu.fuPool.FUList5.opList02 system.cpu.fuPool.FUList5.opList03 system.cpu.fuPool.FUList5.opList04 system.cpu.fuPool.FUList5.opList05 system.cpu.fuPool.FUList5.opList06 system.cpu.fuPool.FUList5.opList07 system.cpu.fuPool.FUList5.opList08 system.cpu.fuPool.FUList5.opList09 system.cpu.fuPool.FUList5.opList10 system.cpu.fuPool.FUList5.opList11 system.cpu.fuPool.FUList5.opList12 system.cpu.fuPool.FUList5.opList13 system.cpu.fuPool.FUList5.opList14 system.cpu.fuPool.FUList5.opList15 system.cpu.fuPool.FUList5.opList16 system.cpu.fuPool.FUList5.opList17 system.cpu.fuPool.FUList5.opList18 system.cpu.fuPool.FUList5.opList19
+
+[system.cpu.fuPool.FUList5.opList00]
+type=OpDesc
+issueLat=1
+opClass=SimdAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList01]
+type=OpDesc
+issueLat=1
+opClass=SimdAddAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList02]
+type=OpDesc
+issueLat=1
+opClass=SimdAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList03]
+type=OpDesc
+issueLat=1
+opClass=SimdCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList04]
+type=OpDesc
+issueLat=1
+opClass=SimdCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList05]
+type=OpDesc
+issueLat=1
+opClass=SimdMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList06]
+type=OpDesc
+issueLat=1
+opClass=SimdMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList07]
+type=OpDesc
+issueLat=1
+opClass=SimdMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList08]
+type=OpDesc
+issueLat=1
+opClass=SimdShift
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList09]
+type=OpDesc
+issueLat=1
+opClass=SimdShiftAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList10]
+type=OpDesc
+issueLat=1
+opClass=SimdSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList11]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList12]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList13]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList14]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList15]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatDiv
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList16]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList17]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList18]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList19]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList6]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList6.opList
+
+[system.cpu.fuPool.FUList6.opList]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList7]
+type=FUDesc
+children=opList0 opList1
+count=4
+opList=system.cpu.fuPool.FUList7.opList0 system.cpu.fuPool.FUList7.opList1
+
+[system.cpu.fuPool.FUList7.opList0]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList7.opList1]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList8]
+type=FUDesc
+children=opList
+count=1
+opList=system.cpu.fuPool.FUList8.opList
+
+[system.cpu.fuPool.FUList8.opList]
+type=OpDesc
+issueLat=3
+opClass=IprAccess
+opLat=3
+
+[system.cpu.icache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=131072
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.icache_port
+mem_side=system.cpu.toL2Bus.port[0]
+
+[system.cpu.itb]
+type=X86TLB
+size=64
+
+[system.cpu.l2cache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=2097152
+subblock_size=0
+tgts_per_mshr=5
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.toL2Bus.port[2]
+mem_side=system.membus.port[1]
+
+[system.cpu.toL2Bus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
+
+[system.cpu.tracer]
+type=ExeTracer
+
+[system.cpu.workload]
+type=LiveProcess
+cmd=mcf mcf.in
+cwd=build/X86_SE/tests/opt/long/10.mcf/x86/linux/o3-timing
+egid=100
+env=
+errout=cerr
+euid=100
+executable=/dist/m5/cpu2000/binaries/x86/linux/mcf
+gid=100
+input=/dist/m5/cpu2000/data/mcf/smred/input/mcf.in
+max_stack_size=67108864
+output=cout
+pid=100
+ppid=99
+simpoint=55300000000
+system=system
+uid=100
+
+[system.membus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.physmem.port[0] system.cpu.l2cache.mem_side
+
+[system.physmem]
+type=PhysicalMemory
+file=
+latency=30000
+latency_var=0
+null=false
+range=0:268435455
+zero=false
+port=system.membus.port[0]
+
diff --git a/tests/long/10.mcf/ref/x86/linux/o3-timing/mcf.out b/tests/long/10.mcf/ref/x86/linux/o3-timing/mcf.out
new file mode 100644
index 000000000..095132477
--- /dev/null
+++ b/tests/long/10.mcf/ref/x86/linux/o3-timing/mcf.out
@@ -0,0 +1,999 @@
+()
+500
+()
+499
+()
+498
+()
+496
+()
+495
+()
+494
+()
+493
+()
+492
+()
+491
+()
+490
+()
+489
+()
+488
+()
+487
+()
+486
+()
+484
+()
+482
+()
+481
+()
+480
+()
+479
+()
+478
+()
+477
+()
+476
+()
+475
+()
+474
+()
+473
+()
+472
+()
+471
+()
+469
+()
+468
+()
+467
+()
+466
+()
+465
+()
+464
+()
+463
+()
+462
+()
+461
+()
+460
+()
+459
+()
+458
+()
+457
+()
+455
+()
+454
+()
+452
+()
+451
+()
+450
+()
+449
+()
+448
+()
+446
+()
+445
+()
+444
+()
+443
+()
+442
+()
+440
+()
+439
+()
+438
+()
+436
+()
+435
+()
+433
+()
+432
+()
+431
+()
+428
+()
+427
+()
+425
+()
+424
+()
+423
+()
+420
+()
+419
+()
+416
+()
+414
+()
+413
+()
+412
+()
+407
+()
+406
+()
+405
+()
+404
+()
+403
+()
+402
+()
+401
+()
+400
+()
+399
+()
+398
+()
+396
+()
+395
+()
+393
+()
+392
+()
+390
+()
+389
+()
+388
+()
+387
+()
+386
+()
+385
+()
+384
+()
+383
+()
+382
+()
+381
+()
+380
+()
+379
+()
+377
+()
+375
+()
+374
+()
+373
+()
+372
+()
+371
+()
+370
+()
+369
+()
+368
+()
+366
+()
+365
+()
+364
+()
+362
+()
+361
+()
+360
+()
+359
+()
+358
+()
+357
+()
+356
+()
+355
+()
+354
+()
+352
+()
+350
+()
+347
+()
+344
+()
+342
+()
+341
+()
+340
+()
+339
+()
+338
+()
+332
+()
+325
+()
+320
+***
+345
+()
+319
+***
+497
+()
+318
+***
+349
+()
+317
+***
+408
+()
+316
+***
+324
+()
+315
+***
+328
+()
+314
+***
+335
+()
+313
+***
+378
+()
+312
+***
+426
+()
+311
+***
+411
+()
+304
+***
+343
+()
+303
+***
+417
+()
+302
+***
+485
+()
+301
+***
+363
+()
+300
+***
+376
+()
+299
+***
+333
+()
+292
+***
+337
+()
+291
+***
+409
+()
+290
+***
+421
+()
+289
+***
+437
+()
+288
+***
+430
+()
+287
+***
+348
+()
+286
+***
+326
+()
+284
+()
+282
+***
+308
+()
+279
+***
+297
+***
+305
+()
+278
+()
+277
+***
+307
+()
+276
+***
+296
+()
+273
+()
+271
+()
+265
+()
+246
+***
+267
+()
+245
+***
+280
+()
+244
+***
+391
+()
+243
+***
+330
+()
+242
+***
+456
+()
+241
+***
+346
+()
+240
+***
+483
+()
+239
+***
+260
+()
+238
+***
+261
+()
+237
+***
+262
+***
+294
+()
+236
+***
+253
+()
+229
+***
+397
+()
+228
+***
+298
+()
+227
+***
+415
+()
+226
+***
+264
+()
+224
+***
+232
+()
+222
+***
+233
+()
+217
+***
+250
+()
+211
+***
+331
+()
+210
+***
+394
+()
+209
+***
+410
+()
+208
+***
+321
+()
+207
+***
+327
+()
+206
+***
+309
+()
+199
+***
+259
+()
+198
+***
+219
+()
+197
+***
+220
+()
+195
+***
+429
+()
+194
+***
+470
+()
+193
+***
+274
+()
+191
+***
+203
+()
+190
+***
+263
+()
+189
+215
+***
+230
+()
+188
+***
+266
+***
+295
+()
+182
+***
+329
+()
+181
+***
+351
+()
+180
+***
+441
+()
+179
+***
+453
+()
+178
+***
+418
+()
+177
+***
+353
+()
+176
+***
+422
+()
+175
+***
+225
+***
+255
+()
+174
+***
+269
+()
+173
+***
+214
+()
+172
+***
+186
+()
+171
+***
+447
+()
+170
+***
+270
+***
+306
+()
+169
+***
+336
+()
+168
+***
+285
+()
+165
+***
+249
+()
+146
+***
+154
+()
+143
+***
+334
+()
+142
+***
+216
+***
+257
+()
+141
+***
+167
+***
+251
+()
+140
+***
+162
+***
+293
+()
+139
+***
+158
+()
+137
+***
+166
+***
+201
+()
+136
+***
+160
+()
+134
+***
+221
+()
+132
+***
+213
+()
+131
+***
+187
+()
+129
+***
+235
+()
+128
+***
+153
+()
+127
+***
+156
+()
+126
+***
+159
+***
+218
+()
+125
+***
+155
+()
+124
+***
+157
+()
+123
+***
+152
+()
+116
+***
+135
+***
+163
+()
+115
+***
+133
+***
+204
+***
+248
+()
+114
+***
+192
+***
+212
+()
+113
+***
+268
+()
+112
+***
+367
+()
+111
+***
+272
+()
+110
+***
+434
+()
+109
+***
+323
+()
+108
+***
+281
+()
+107
+***
+144
+***
+148
+()
+106
+***
+275
+()
+105
+***
+196
+***
+254
+()
+104
+***
+138
+***
+161
+()
+103
+***
+310
+()
+102
+***
+223
+***
+252
+()
+80
+()
+70
+()
+69
+()
+68
+()
+66
+()
+64
+()
+62
+***
+256
+()
+61
+***
+93
+()
+59
+***
+120
+()
+58
+()
+57
+***
+183
+()
+55
+()
+54
+()
+52
+***
+147
+()
+51
+***
+118
+()
+50
+***
+83
+()
+49
+***
+98
+()
+48
+***
+99
+()
+47
+()
+46
+***
+184
+()
+45
+***
+121
+()
+44
+()
+43
+***
+88
+()
+42
+***
+122
+()
+41
+***
+91
+()
+40
+***
+96
+()
+38
+***
+100
+()
+37
+***
+149
+()
+36
+***
+74
+()
+35
+***
+258
+()
+34
+***
+151
+()
+33
+***
+85
+()
+32
+()
+31
+***
+94
+()
+30
+***
+97
+()
+29
+***
+90
+()
+28
+***
+89
+()
+27
+***
+92
+()
+26
+***
+72
+***
+247
+()
+25
+***
+86
+()
+24
+***
+82
+()
+23
+***
+87
+***
+117
+()
+22
+***
+76
+***
+119
+()
+21
+***
+84
+()
+20
+***
+78
+()
+19
+***
+73
+()
+18
+***
+81
+()
+17
+***
+65
+()
+16
+***
+63
+***
+101
+()
+15
+***
+71
+()
+14
+***
+75
+()
+13
+***
+322
+()
+12
+***
+77
+()
+11
+***
+283
+()
+10
+***
+79
+()
+9
+***
+145
+***
+150
+()
+8
+***
+67
+()
+7
+***
+60
+***
+231
+()
+6
+***
+56
+***
+234
+()
+5
+***
+164
+***
+202
+()
+4
+***
+53
+()
+3
+***
+130
+***
+185
+***
+200
+()
+2
+***
+205
+()
+1
+***
+39
+***
+95
diff --git a/tests/long/10.mcf/ref/x86/linux/o3-timing/simerr b/tests/long/10.mcf/ref/x86/linux/o3-timing/simerr
new file mode 100755
index 000000000..94d399eab
--- /dev/null
+++ b/tests/long/10.mcf/ref/x86/linux/o3-timing/simerr
@@ -0,0 +1,7 @@
+warn: Sockets disabled, not accepting gdb connections
+For more information see: http://www.m5sim.org/warn/d946bea6
+warn: instruction 'fnstcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+warn: instruction 'fldcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+hack: be nice to actually delete the event here
diff --git a/tests/long/10.mcf/ref/x86/linux/o3-timing/simout b/tests/long/10.mcf/ref/x86/linux/o3-timing/simout
new file mode 100755
index 000000000..fde487a8e
--- /dev/null
+++ b/tests/long/10.mcf/ref/x86/linux/o3-timing/simout
@@ -0,0 +1,31 @@
+M5 Simulator System
+
+Copyright (c) 2001-2008
+The Regents of The University of Michigan
+All Rights Reserved
+
+
+M5 compiled Jan 31 2011 16:34:44
+M5 revision 1b98eea40540 7883 default qtip tip x86o3regressions.patch
+M5 started Jan 31 2011 16:34:46
+M5 executing on burrito
+command line: build/X86_SE/m5.opt -d build/X86_SE/tests/opt/long/10.mcf/x86/linux/o3-timing -re tests/run.py build/X86_SE/tests/opt/long/10.mcf/x86/linux/o3-timing
+Global frequency set at 1000000000000 ticks per second
+info: Entering event queue @ 0. Starting simulation...
+
+MCF SPEC version 1.6.I
+by Andreas Loebel
+Copyright (c) 1998,1999 ZIB Berlin
+All Rights Reserved.
+
+nodes : 500
+active arcs : 1905
+simplex iterations : 1502
+flow value : 4990014995
+new implicit arcs : 23867
+active arcs : 25772
+simplex iterations : 2663
+flow value : 3080014995
+checksum : 68389
+optimal
+Exiting @ tick 170680631000 because target called exit()
diff --git a/tests/long/10.mcf/ref/x86/linux/o3-timing/stats.txt b/tests/long/10.mcf/ref/x86/linux/o3-timing/stats.txt
new file mode 100644
index 000000000..8ba88fe5a
--- /dev/null
+++ b/tests/long/10.mcf/ref/x86/linux/o3-timing/stats.txt
@@ -0,0 +1,442 @@
+
+---------- Begin Simulation Statistics ----------
+host_inst_rate 76828 # Simulator instruction rate (inst/s)
+host_mem_usage 366252 # Number of bytes of host memory used
+host_seconds 3621.00 # Real time elapsed on the host
+host_tick_rate 47136339 # Simulator tick rate (ticks/s)
+sim_freq 1000000000000 # Frequency of simulated ticks
+sim_insts 278192519 # Number of instructions simulated
+sim_seconds 0.170681 # Number of seconds simulated
+sim_ticks 170680631000 # Number of ticks simulated
+system.cpu.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
+system.cpu.BPredUnit.BTBHits 50810617 # Number of BTB hits
+system.cpu.BPredUnit.BTBLookups 51416767 # Number of BTB lookups
+system.cpu.BPredUnit.RASInCorrect 0 # Number of incorrect RAS predictions.
+system.cpu.BPredUnit.condIncorrect 4328981 # Number of conditional branches incorrect
+system.cpu.BPredUnit.condPredicted 51416803 # Number of conditional branches predicted
+system.cpu.BPredUnit.lookups 51416803 # Number of BP lookups
+system.cpu.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
+system.cpu.commit.COM:branches 29309710 # Number of branches committed
+system.cpu.commit.COM:bw_lim_events 2488105 # number cycles where commit BW limit reached
+system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits
+system.cpu.commit.COM:committed_per_cycle::samples 321793097 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::mean 0.864507 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::stdev 1.425920 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::underflows 0 0.00% 0.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::0 183622049 57.06% 57.06% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::1 75902754 23.59% 80.65% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::2 27223254 8.46% 89.11% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::3 17908154 5.57% 94.67% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::4 5463718 1.70% 96.37% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::5 3630830 1.13% 97.50% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::6 4674698 1.45% 98.95% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::7 879535 0.27% 99.23% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::8 2488105 0.77% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::overflows 0 0.00% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::min_value 0 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::max_value 8 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::total 321793097 # Number of insts commited each cycle
+system.cpu.commit.COM:count 278192519 # Number of instructions committed
+system.cpu.commit.COM:loads 90779388 # Number of loads committed
+system.cpu.commit.COM:membars 0 # Number of memory barriers committed
+system.cpu.commit.COM:refs 122219139 # Number of memory references committed
+system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
+system.cpu.commit.branchMispredicts 4328992 # The number of times a branch was mispredicted
+system.cpu.commit.commitCommittedInsts 278192519 # The number of committed instructions
+system.cpu.commit.commitNonSpecStalls 446 # The number of times commit has been forced to stall to communicate backwards
+system.cpu.commit.commitSquashedInsts 111464423 # The number of squashed insts skipped by commit
+system.cpu.committedInsts 278192519 # Number of Instructions Simulated
+system.cpu.committedInsts_total 278192519 # Number of Instructions Simulated
+system.cpu.cpi 1.227068 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 1.227068 # CPI: Total CPI of All Threads
+system.cpu.dcache.ReadReq_accesses 82779625 # number of ReadReq accesses(hits+misses)
+system.cpu.dcache.ReadReq_avg_miss_latency 5978.815311 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 2941.059048 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 80764514 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 12047976500 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.024343 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 2015111 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_hits 45360 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 5793154000 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.023795 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_misses 1969751 # number of ReadReq MSHR misses
+system.cpu.dcache.WriteReq_accesses 31439751 # number of WriteReq accesses(hits+misses)
+system.cpu.dcache.WriteReq_avg_miss_latency 20696.077989 # average WriteReq miss latency
+system.cpu.dcache.WriteReq_avg_mshr_miss_latency 15440.513442 # average WriteReq mshr miss latency
+system.cpu.dcache.WriteReq_hits 31284703 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 3208885500 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.004932 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 155048 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_hits 48629 # number of WriteReq MSHR hits
+system.cpu.dcache.WriteReq_mshr_miss_latency 1643164000 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.003385 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 106419 # number of WriteReq MSHR misses
+system.cpu.dcache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_refs 53.969218 # Average number of references to valid blocks.
+system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.cache_copies 0 # number of cache copies performed
+system.cpu.dcache.demand_accesses 114219376 # number of demand (read+write) accesses
+system.cpu.dcache.demand_avg_miss_latency 7030.296858 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 3581.748123 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 112049217 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 15256862000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.019000 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 2170159 # number of demand (read+write) misses
+system.cpu.dcache.demand_mshr_hits 93989 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_miss_latency 7436318000 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.018177 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 2076170 # number of demand (read+write) MSHR misses
+system.cpu.dcache.fast_writes 0 # number of fast writes performed
+system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.dcache.occ_%::0 0.995143 # Average percentage of cache occupancy
+system.cpu.dcache.occ_blocks::0 4076.104755 # Average occupied blocks per context
+system.cpu.dcache.overall_accesses 114219376 # number of overall (read+write) accesses
+system.cpu.dcache.overall_avg_miss_latency 7030.296858 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 3581.748123 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_hits 112049217 # number of overall hits
+system.cpu.dcache.overall_miss_latency 15256862000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.019000 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 2170159 # number of overall misses
+system.cpu.dcache.overall_mshr_hits 93989 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_miss_latency 7436318000 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.018177 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 2076170 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.dcache.replacements 2072073 # number of replacements
+system.cpu.dcache.sampled_refs 2076169 # Sample count of references to valid blocks.
+system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.dcache.tagsinuse 4076.104755 # Cycle average of tags in use
+system.cpu.dcache.total_refs 112049217 # Total number of references to valid blocks.
+system.cpu.dcache.warmup_cycle 66009760000 # Cycle when the warmup percentage was hit.
+system.cpu.dcache.writebacks 1440063 # number of writebacks
+system.cpu.decode.DECODE:BlockedCycles 922031 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:DecodedInsts 437195268 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 92021485 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 228705655 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 19453848 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:UnblockCycles 143926 # Number of cycles decode is unblocking
+system.cpu.fetch.Branches 51416803 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 39245397 # Number of cache lines fetched
+system.cpu.fetch.Cycles 242939967 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 793923 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 249694241 # Number of instructions fetch has processed
+system.cpu.fetch.MiscStallCycles 16 # Number of cycles fetch has spent waiting on interrupts, or bad addresses, or out of MSHRs
+system.cpu.fetch.SquashCycles 9845420 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.150623 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 39245397 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.fetch.predictedBranches 50810617 # Number of branches that fetch has predicted taken
+system.cpu.fetch.rate 0.731466 # Number of inst fetches per cycle
+system.cpu.fetch.rateDist::samples 341246945 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::mean 1.321737 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::stdev 1.251135 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::underflows 0 0.00% 0.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::0 105340577 30.87% 30.87% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::1 115413940 33.82% 64.69% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::2 47580781 13.94% 78.63% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::3 58732555 17.21% 95.84% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::4 7189604 2.11% 97.95% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::5 6451059 1.89% 99.84% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::6 527277 0.15% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::7 932 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::8 10220 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::overflows 0 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::min_value 0 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::max_value 8 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::total 341246945 # Number of instructions fetched each cycle (Total)
+system.cpu.icache.ReadReq_accesses 39245397 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_avg_miss_latency 37208.490566 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 35316.192560 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 39244337 # number of ReadReq hits
+system.cpu.icache.ReadReq_miss_latency 39441000 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_rate 0.000027 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_misses 1060 # number of ReadReq misses
+system.cpu.icache.ReadReq_mshr_hits 146 # number of ReadReq MSHR hits
+system.cpu.icache.ReadReq_mshr_miss_latency 32279000 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.000023 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_misses 914 # number of ReadReq MSHR misses
+system.cpu.icache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_refs 42936.911379 # Average number of references to valid blocks.
+system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.cache_copies 0 # number of cache copies performed
+system.cpu.icache.demand_accesses 39245397 # number of demand (read+write) accesses
+system.cpu.icache.demand_avg_miss_latency 37208.490566 # average overall miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency 35316.192560 # average overall mshr miss latency
+system.cpu.icache.demand_hits 39244337 # number of demand (read+write) hits
+system.cpu.icache.demand_miss_latency 39441000 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_rate 0.000027 # miss rate for demand accesses
+system.cpu.icache.demand_misses 1060 # number of demand (read+write) misses
+system.cpu.icache.demand_mshr_hits 146 # number of demand (read+write) MSHR hits
+system.cpu.icache.demand_mshr_miss_latency 32279000 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_rate 0.000023 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_misses 914 # number of demand (read+write) MSHR misses
+system.cpu.icache.fast_writes 0 # number of fast writes performed
+system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.icache.occ_%::0 0.360466 # Average percentage of cache occupancy
+system.cpu.icache.occ_blocks::0 738.235227 # Average occupied blocks per context
+system.cpu.icache.overall_accesses 39245397 # number of overall (read+write) accesses
+system.cpu.icache.overall_avg_miss_latency 37208.490566 # average overall miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 35316.192560 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.icache.overall_hits 39244337 # number of overall hits
+system.cpu.icache.overall_miss_latency 39441000 # number of overall miss cycles
+system.cpu.icache.overall_miss_rate 0.000027 # miss rate for overall accesses
+system.cpu.icache.overall_misses 1060 # number of overall misses
+system.cpu.icache.overall_mshr_hits 146 # number of overall MSHR hits
+system.cpu.icache.overall_mshr_miss_latency 32279000 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_rate 0.000023 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_misses 914 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.icache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.icache.replacements 37 # number of replacements
+system.cpu.icache.sampled_refs 914 # Sample count of references to valid blocks.
+system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.icache.tagsinuse 738.235227 # Cycle average of tags in use
+system.cpu.icache.total_refs 39244337 # Total number of references to valid blocks.
+system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.icache.writebacks 0 # number of writebacks
+system.cpu.idleCycles 114318 # Total number of cycles that the CPU has spent unscheduled due to idling
+system.cpu.iew.EXEC:branches 31118985 # Number of branches executed
+system.cpu.iew.EXEC:nop 0 # number of nop insts executed
+system.cpu.iew.EXEC:rate 0.940576 # Inst execution rate
+system.cpu.iew.EXEC:refs 137464023 # number of memory reference insts executed
+system.cpu.iew.EXEC:stores 32172568 # Number of stores executed
+system.cpu.iew.EXEC:swp 0 # number of swp insts executed
+system.cpu.iew.WB:consumers 361852587 # num instructions consuming a value
+system.cpu.iew.WB:count 317781549 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.623035 # average fanout of values written-back
+system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
+system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
+system.cpu.iew.WB:producers 225446782 # num instructions producing a value
+system.cpu.iew.WB:rate 0.930924 # insts written-back per cycle
+system.cpu.iew.WB:sent 318008427 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 5390321 # Number of branch mispredicts detected at execute
+system.cpu.iew.iewBlockCycles 197365 # Number of cycles IEW is blocking
+system.cpu.iew.iewDispLoadInsts 131280417 # Number of dispatched load instructions
+system.cpu.iew.iewDispNonSpecInsts 455 # Number of dispatched non-speculative instructions
+system.cpu.iew.iewDispSquashedInsts 3671049 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispStoreInsts 41039188 # Number of dispatched store instructions
+system.cpu.iew.iewDispatchedInsts 389592858 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 105291455 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 12266571 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 321076071 # Number of executed instructions
+system.cpu.iew.iewIQFullEvents 2799 # Number of times the IQ has become full, causing a stall
+system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
+system.cpu.iew.iewLSQFullEvents 1704 # Number of times the LSQ has become full, causing a stall
+system.cpu.iew.iewSquashCycles 19453848 # Number of cycles IEW is squashing
+system.cpu.iew.iewUnblockCycles 10507 # Number of cycles IEW is unblocking
+system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding
+system.cpu.iew.lsq.thread.0.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked
+system.cpu.iew.lsq.thread.0.forwLoads 22405068 # Number of loads that had data forwarded from stores
+system.cpu.iew.lsq.thread.0.ignoredResponses 64376 # Number of memory responses ignored because the instruction is squashed
+system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.memOrderViolation 5520980 # Number of memory ordering violations
+system.cpu.iew.lsq.thread.0.rescheduledLoads 2668 # Number of loads that were rescheduled
+system.cpu.iew.lsq.thread.0.squashedLoads 40501029 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedStores 9599437 # Number of stores squashed
+system.cpu.iew.memOrderViolationEvents 5520980 # Number of memory order violations
+system.cpu.iew.predictedNotTakenIncorrect 16897 # Number of branches that were predicted not taken incorrectly
+system.cpu.iew.predictedTakenIncorrect 5373424 # Number of branches that were predicted taken incorrectly
+system.cpu.ipc 0.814950 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 0.814950 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0::No_OpClass 16700 0.01% 0.01% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntAlu 193455065 58.03% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntMult 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntDiv 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatAdd 15 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCmp 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCvt 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatMult 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatDiv 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatSqrt 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAdd 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAddAcc 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAlu 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCmp 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCvt 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMisc 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMult 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMultAcc 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShift 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShiftAcc 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdSqrt 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAdd 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAlu 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCmp 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCvt 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatDiv 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMisc 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMult 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMultAcc 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatSqrt 0 0.00% 58.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemRead 107162338 32.15% 90.19% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemWrite 32708524 9.81% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IprAccess 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::InstPrefetch 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::total 333342642 # Type of FU issued
+system.cpu.iq.ISSUE:fu_busy_cnt 98152 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_rate 0.000294 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_full::No_OpClass 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntAlu 15 0.02% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAddAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAlu 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMisc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMultAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShift 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShiftAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAlu 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMisc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMultAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemRead 97651 99.49% 99.50% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemWrite 486 0.50% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IprAccess 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::InstPrefetch 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:issued_per_cycle::samples 341246945 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::mean 0.976837 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::stdev 1.032280 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::underflows 0 0.00% 0.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::0 143332703 42.00% 42.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::1 98734149 28.93% 70.94% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::2 68142120 19.97% 90.90% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::3 26890607 7.88% 98.78% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::4 3089152 0.91% 99.69% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::5 1054470 0.31% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::6 2951 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::7 576 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::8 217 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::overflows 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::min_value 0 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::max_value 8 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::total 341246945 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:rate 0.976510 # Inst issue rate
+system.cpu.iq.iqInstsAdded 389592403 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 333342642 # Number of instructions issued
+system.cpu.iq.iqNonSpecInstsAdded 455 # Number of non-speculative instructions added to the IQ
+system.cpu.iq.iqSquashedInstsExamined 109882124 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedNonSpecRemoved 9 # Number of squashed non-spec instructions that were removed
+system.cpu.iq.iqSquashedOperandsExamined 237362106 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadExReq_accesses 106419 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_avg_miss_latency 34277.831445 # average ReadExReq miss latency
+system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 31049.336758 # average ReadExReq mshr miss latency
+system.cpu.l2cache.ReadExReq_hits 63976 # number of ReadExReq hits
+system.cpu.l2cache.ReadExReq_miss_latency 1454854000 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_rate 0.398829 # miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_misses 42443 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 1317827000 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_mshr_miss_rate 0.398829 # mshr miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_mshr_misses 42443 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadReq_accesses 1970665 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_avg_miss_latency 34310.495712 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 31007.530164 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_hits 1936270 # number of ReadReq hits
+system.cpu.l2cache.ReadReq_miss_latency 1180109500 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.017453 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 34395 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 1066504000 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.017453 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 34395 # number of ReadReq MSHR misses
+system.cpu.l2cache.Writeback_accesses 1440063 # number of Writeback accesses(hits+misses)
+system.cpu.l2cache.Writeback_hits 1440063 # number of Writeback hits
+system.cpu.l2cache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_refs 42.751383 # Average number of references to valid blocks.
+system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.cache_copies 0 # number of cache copies performed
+system.cpu.l2cache.demand_accesses 2077084 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_avg_miss_latency 34292.452953 # average overall miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 31030.622869 # average overall mshr miss latency
+system.cpu.l2cache.demand_hits 2000246 # number of demand (read+write) hits
+system.cpu.l2cache.demand_miss_latency 2634963500 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.036993 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 76838 # number of demand (read+write) misses
+system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
+system.cpu.l2cache.demand_mshr_miss_latency 2384331000 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.036993 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 76838 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.fast_writes 0 # number of fast writes performed
+system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.l2cache.occ_%::0 0.192442 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_%::1 0.349126 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_blocks::0 6305.950681 # Average occupied blocks per context
+system.cpu.l2cache.occ_blocks::1 11440.167306 # Average occupied blocks per context
+system.cpu.l2cache.overall_accesses 2077084 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_avg_miss_latency 34292.452953 # average overall miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 31030.622869 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_hits 2000246 # number of overall hits
+system.cpu.l2cache.overall_miss_latency 2634963500 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.036993 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 76838 # number of overall misses
+system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
+system.cpu.l2cache.overall_mshr_miss_latency 2384331000 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.036993 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 76838 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.l2cache.replacements 49392 # number of replacements
+system.cpu.l2cache.sampled_refs 77392 # Sample count of references to valid blocks.
+system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.l2cache.tagsinuse 17746.117987 # Cycle average of tags in use
+system.cpu.l2cache.total_refs 3308615 # Total number of references to valid blocks.
+system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.l2cache.writebacks 29474 # number of writebacks
+system.cpu.memDep0.conflictingLoads 22358679 # Number of conflicting loads.
+system.cpu.memDep0.conflictingStores 3757180 # Number of conflicting stores.
+system.cpu.memDep0.insertedLoads 131280417 # Number of loads inserted to the mem dependence unit.
+system.cpu.memDep0.insertedStores 41039188 # Number of stores inserted to the mem dependence unit.
+system.cpu.numCycles 341361263 # number of cpu cycles simulated
+system.cpu.rename.RENAME:BlockCycles 486743 # Number of cycles rename is blocking
+system.cpu.rename.RENAME:CommittedMaps 248344192 # Number of HB maps that are committed
+system.cpu.rename.RENAME:IQFullEvents 12249 # Number of times rename has blocked due to IQ full
+system.cpu.rename.RENAME:IdleCycles 98511117 # Number of cycles rename is idle
+system.cpu.rename.RENAME:LSQFullEvents 368076 # Number of times rename has blocked due to LSQ full
+system.cpu.rename.RENAME:RenameLookups 1292599643 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 423407319 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 377348250 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 222275258 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 19453848 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:UnblockCycles 514692 # Number of cycles rename is unblocking
+system.cpu.rename.RENAME:UndoneMaps 129004058 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 5287 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:serializingInsts 454 # count of serializing insts renamed
+system.cpu.rename.RENAME:skidInsts 779091 # count of insts added to the skid buffer
+system.cpu.rename.RENAME:tempSerializingInsts 452 # count of temporary serializing insts renamed
+system.cpu.timesIdled 5627 # Number of times that the entire CPU went into an idle state and unscheduled itself
+system.cpu.workload.PROG:num_syscalls 444 # Number of system calls
+
+---------- End Simulation Statistics ----------
diff --git a/tests/long/20.parser/ref/x86/linux/o3-timing/config.ini b/tests/long/20.parser/ref/x86/linux/o3-timing/config.ini
new file mode 100644
index 000000000..aa5254a3b
--- /dev/null
+++ b/tests/long/20.parser/ref/x86/linux/o3-timing/config.ini
@@ -0,0 +1,519 @@
+[root]
+type=Root
+children=system
+time_sync_enable=false
+time_sync_period=100000000000
+time_sync_spin_threshold=100000000
+
+[system]
+type=System
+children=cpu membus physmem
+mem_mode=atomic
+physmem=system.physmem
+
+[system.cpu]
+type=DerivO3CPU
+children=dcache dtb fuPool icache itb l2cache toL2Bus tracer workload
+BTBEntries=4096
+BTBTagSize=16
+LFSTSize=1024
+LQEntries=32
+RASSize=16
+SQEntries=32
+SSITSize=1024
+activity=0
+backComSize=5
+cachePorts=200
+checker=Null
+choiceCtrBits=2
+choicePredictorSize=8192
+clock=500
+commitToDecodeDelay=1
+commitToFetchDelay=1
+commitToIEWDelay=1
+commitToRenameDelay=1
+commitWidth=8
+cpu_id=0
+decodeToFetchDelay=1
+decodeToRenameDelay=1
+decodeWidth=8
+defer_registration=false
+dispatchWidth=8
+do_checkpoint_insts=true
+do_statistics_insts=true
+dtb=system.cpu.dtb
+fetchToDecodeDelay=1
+fetchTrapLatency=1
+fetchWidth=8
+forwardComSize=5
+fuPool=system.cpu.fuPool
+function_trace=false
+function_trace_start=0
+globalCtrBits=2
+globalHistoryBits=13
+globalPredictorSize=8192
+iewToCommitDelay=1
+iewToDecodeDelay=1
+iewToFetchDelay=1
+iewToRenameDelay=1
+instShiftAmt=2
+issueToExecuteDelay=1
+issueWidth=8
+itb=system.cpu.itb
+localCtrBits=2
+localHistoryBits=11
+localHistoryTableSize=2048
+localPredictorSize=2048
+max_insts_all_threads=0
+max_insts_any_thread=0
+max_loads_all_threads=0
+max_loads_any_thread=0
+numIQEntries=64
+numPhysFloatRegs=256
+numPhysIntRegs=256
+numROBEntries=192
+numRobs=1
+numThreads=1
+phase=0
+predType=tournament
+progress_interval=0
+renameToDecodeDelay=1
+renameToFetchDelay=1
+renameToIEWDelay=2
+renameToROBDelay=1
+renameWidth=8
+smtCommitPolicy=RoundRobin
+smtFetchPolicy=SingleThread
+smtIQPolicy=Partitioned
+smtIQThreshold=100
+smtLSQPolicy=Partitioned
+smtLSQThreshold=100
+smtNumFetchingThreads=1
+smtROBPolicy=Partitioned
+smtROBThreshold=100
+squashWidth=8
+system=system
+tracer=system.cpu.tracer
+trapLatency=13
+wbDepth=1
+wbWidth=8
+workload=system.cpu.workload
+dcache_port=system.cpu.dcache.cpu_side
+icache_port=system.cpu.icache.cpu_side
+
+[system.cpu.dcache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=262144
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.dcache_port
+mem_side=system.cpu.toL2Bus.port[1]
+
+[system.cpu.dtb]
+type=X86TLB
+size=64
+
+[system.cpu.fuPool]
+type=FUPool
+children=FUList0 FUList1 FUList2 FUList3 FUList4 FUList5 FUList6 FUList7 FUList8
+FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 system.cpu.fuPool.FUList8
+
+[system.cpu.fuPool.FUList0]
+type=FUDesc
+children=opList
+count=6
+opList=system.cpu.fuPool.FUList0.opList
+
+[system.cpu.fuPool.FUList0.opList]
+type=OpDesc
+issueLat=1
+opClass=IntAlu
+opLat=1
+
+[system.cpu.fuPool.FUList1]
+type=FUDesc
+children=opList0 opList1
+count=2
+opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1
+
+[system.cpu.fuPool.FUList1.opList0]
+type=OpDesc
+issueLat=1
+opClass=IntMult
+opLat=3
+
+[system.cpu.fuPool.FUList1.opList1]
+type=OpDesc
+issueLat=19
+opClass=IntDiv
+opLat=20
+
+[system.cpu.fuPool.FUList2]
+type=FUDesc
+children=opList0 opList1 opList2
+count=4
+opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2
+
+[system.cpu.fuPool.FUList2.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatAdd
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList1]
+type=OpDesc
+issueLat=1
+opClass=FloatCmp
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList2]
+type=OpDesc
+issueLat=1
+opClass=FloatCvt
+opLat=2
+
+[system.cpu.fuPool.FUList3]
+type=FUDesc
+children=opList0 opList1 opList2
+count=2
+opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2
+
+[system.cpu.fuPool.FUList3.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatMult
+opLat=4
+
+[system.cpu.fuPool.FUList3.opList1]
+type=OpDesc
+issueLat=12
+opClass=FloatDiv
+opLat=12
+
+[system.cpu.fuPool.FUList3.opList2]
+type=OpDesc
+issueLat=24
+opClass=FloatSqrt
+opLat=24
+
+[system.cpu.fuPool.FUList4]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList4.opList
+
+[system.cpu.fuPool.FUList4.opList]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList5]
+type=FUDesc
+children=opList00 opList01 opList02 opList03 opList04 opList05 opList06 opList07 opList08 opList09 opList10 opList11 opList12 opList13 opList14 opList15 opList16 opList17 opList18 opList19
+count=4
+opList=system.cpu.fuPool.FUList5.opList00 system.cpu.fuPool.FUList5.opList01 system.cpu.fuPool.FUList5.opList02 system.cpu.fuPool.FUList5.opList03 system.cpu.fuPool.FUList5.opList04 system.cpu.fuPool.FUList5.opList05 system.cpu.fuPool.FUList5.opList06 system.cpu.fuPool.FUList5.opList07 system.cpu.fuPool.FUList5.opList08 system.cpu.fuPool.FUList5.opList09 system.cpu.fuPool.FUList5.opList10 system.cpu.fuPool.FUList5.opList11 system.cpu.fuPool.FUList5.opList12 system.cpu.fuPool.FUList5.opList13 system.cpu.fuPool.FUList5.opList14 system.cpu.fuPool.FUList5.opList15 system.cpu.fuPool.FUList5.opList16 system.cpu.fuPool.FUList5.opList17 system.cpu.fuPool.FUList5.opList18 system.cpu.fuPool.FUList5.opList19
+
+[system.cpu.fuPool.FUList5.opList00]
+type=OpDesc
+issueLat=1
+opClass=SimdAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList01]
+type=OpDesc
+issueLat=1
+opClass=SimdAddAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList02]
+type=OpDesc
+issueLat=1
+opClass=SimdAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList03]
+type=OpDesc
+issueLat=1
+opClass=SimdCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList04]
+type=OpDesc
+issueLat=1
+opClass=SimdCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList05]
+type=OpDesc
+issueLat=1
+opClass=SimdMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList06]
+type=OpDesc
+issueLat=1
+opClass=SimdMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList07]
+type=OpDesc
+issueLat=1
+opClass=SimdMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList08]
+type=OpDesc
+issueLat=1
+opClass=SimdShift
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList09]
+type=OpDesc
+issueLat=1
+opClass=SimdShiftAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList10]
+type=OpDesc
+issueLat=1
+opClass=SimdSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList11]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList12]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList13]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList14]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList15]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatDiv
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList16]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList17]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList18]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList19]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList6]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList6.opList
+
+[system.cpu.fuPool.FUList6.opList]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList7]
+type=FUDesc
+children=opList0 opList1
+count=4
+opList=system.cpu.fuPool.FUList7.opList0 system.cpu.fuPool.FUList7.opList1
+
+[system.cpu.fuPool.FUList7.opList0]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList7.opList1]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList8]
+type=FUDesc
+children=opList
+count=1
+opList=system.cpu.fuPool.FUList8.opList
+
+[system.cpu.fuPool.FUList8.opList]
+type=OpDesc
+issueLat=3
+opClass=IprAccess
+opLat=3
+
+[system.cpu.icache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=131072
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.icache_port
+mem_side=system.cpu.toL2Bus.port[0]
+
+[system.cpu.itb]
+type=X86TLB
+size=64
+
+[system.cpu.l2cache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=2097152
+subblock_size=0
+tgts_per_mshr=5
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.toL2Bus.port[2]
+mem_side=system.membus.port[1]
+
+[system.cpu.toL2Bus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
+
+[system.cpu.tracer]
+type=ExeTracer
+
+[system.cpu.workload]
+type=LiveProcess
+cmd=parser 2.1.dict -batch
+cwd=build/X86_SE/tests/opt/long/20.parser/x86/linux/o3-timing
+egid=100
+env=
+errout=cerr
+euid=100
+executable=/dist/m5/cpu2000/binaries/x86/linux/parser
+gid=100
+input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in
+max_stack_size=67108864
+output=cout
+pid=100
+ppid=99
+simpoint=114600000000
+system=system
+uid=100
+
+[system.membus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.physmem.port[0] system.cpu.l2cache.mem_side
+
+[system.physmem]
+type=PhysicalMemory
+file=
+latency=30000
+latency_var=0
+null=false
+range=0:134217727
+zero=false
+port=system.membus.port[0]
+
diff --git a/tests/long/20.parser/ref/x86/linux/o3-timing/simerr b/tests/long/20.parser/ref/x86/linux/o3-timing/simerr
new file mode 100755
index 000000000..94d399eab
--- /dev/null
+++ b/tests/long/20.parser/ref/x86/linux/o3-timing/simerr
@@ -0,0 +1,7 @@
+warn: Sockets disabled, not accepting gdb connections
+For more information see: http://www.m5sim.org/warn/d946bea6
+warn: instruction 'fnstcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+warn: instruction 'fldcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+hack: be nice to actually delete the event here
diff --git a/tests/long/20.parser/ref/x86/linux/o3-timing/simout b/tests/long/20.parser/ref/x86/linux/o3-timing/simout
new file mode 100755
index 000000000..6e2ddc167
--- /dev/null
+++ b/tests/long/20.parser/ref/x86/linux/o3-timing/simout
@@ -0,0 +1,77 @@
+M5 Simulator System
+
+Copyright (c) 2001-2008
+The Regents of The University of Michigan
+All Rights Reserved
+
+
+M5 compiled Jan 31 2011 16:34:44
+M5 revision 1b98eea40540 7883 default qtip tip x86o3regressions.patch
+M5 started Jan 31 2011 16:34:46
+M5 executing on burrito
+command line: build/X86_SE/m5.opt -d build/X86_SE/tests/opt/long/20.parser/x86/linux/o3-timing -re tests/run.py build/X86_SE/tests/opt/long/20.parser/x86/linux/o3-timing
+Global frequency set at 1000000000000 ticks per second
+info: Entering event queue @ 0. Starting simulation...
+
+ Reading the dictionary files: *****************************info: Increasing stack size by one page.
+********************
+ 58924 words stored in 3784810 bytes
+
+
+Welcome to the Link Parser -- Version 2.1
+
+ Copyright (C) 1991-1995 Daniel Sleator and Davy Temperley
+
+Processing sentences in batch mode
+
+Echoing of input sentence turned on.
+* as had expected the party to be a success , it was a success
+* do you know where John 's
+* he said that , finding that it was impossible to get work as a waiter , he would work as a janitor
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+* how fast the program is it
+* I am wondering whether to invite to the party
+* I gave him for his birthday it
+* I thought terrible after our discussion
+* I wonder how much money have you earned
+* Janet who is an expert on dogs helped me choose one
+* she interviewed more programmers than was hired
+* such flowers are found chiefly particularly in Europe
+* the dogs some of which were very large ran after the man
+* the man whom I play tennis is here
+* there is going to be an important meeting January
+* to pretend that our program is usable in its current form would be happy
+* we're thinking about going to a movie this theater
+* which dog you said you chased
+- also invited to the meeting were several prominent scientists
+- he ran home so quickly that his mother could hardly believe he had called from school
+- so many people attended that they spilled over into several neighboring fields
+- voting in favor of the bill were 36 Republicans and 4 moderate Democrats
+: Grace may not be possible to fix the problem
+ any program as good as ours should be useful
+ biochemically , I think the experiment has a lot of problems
+ Fred has had five years of experience as a programmer
+ he is looking for another job
+ how did John do it
+ how many more people do you think will come
+ how much more spilled
+ I have more money than John has time
+ I made it clear that I was angry
+ I wonder how John did it
+ I wonder how much more quickly he ran
+ invite John and whoever else you want to invite
+ it is easier to ignore the problem than it is to solve it
+ many who initially supported Thomas later changed their minds
+ neither Mary nor Louise are coming to the party
+ she interviewed more programmers than were hired
+ telling Joe that Sue was coming to the party would create a real problem
+ the man with whom I play tennis is here
+ there is a dog in the park
+ this is not the man we know and love
+ we like to eat at restaurants , usually on weekends
+ what did John say he thought you should do
+ about 2 million people attended
+ the five best costumes got prizes
+No errors!
+Exiting @ tick 817002039000 because target called exit()
diff --git a/tests/long/20.parser/ref/x86/linux/o3-timing/stats.txt b/tests/long/20.parser/ref/x86/linux/o3-timing/stats.txt
new file mode 100644
index 000000000..c8db50488
--- /dev/null
+++ b/tests/long/20.parser/ref/x86/linux/o3-timing/stats.txt
@@ -0,0 +1,454 @@
+
+---------- Begin Simulation Statistics ----------
+host_inst_rate 123365 # Simulator instruction rate (inst/s)
+host_mem_usage 239740 # Number of bytes of host memory used
+host_seconds 12393.99 # Real time elapsed on the host
+host_tick_rate 65919204 # Simulator tick rate (ticks/s)
+sim_freq 1000000000000 # Frequency of simulated ticks
+sim_insts 1528988756 # Number of instructions simulated
+sim_seconds 0.817002 # Number of seconds simulated
+sim_ticks 817002039000 # Number of ticks simulated
+system.cpu.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
+system.cpu.BPredUnit.BTBHits 197674461 # Number of BTB hits
+system.cpu.BPredUnit.BTBLookups 215147546 # Number of BTB lookups
+system.cpu.BPredUnit.RASInCorrect 0 # Number of incorrect RAS predictions.
+system.cpu.BPredUnit.condIncorrect 17901021 # Number of conditional branches incorrect
+system.cpu.BPredUnit.condPredicted 215739151 # Number of conditional branches predicted
+system.cpu.BPredUnit.lookups 215739151 # Number of BP lookups
+system.cpu.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
+system.cpu.commit.COM:branches 149758588 # Number of branches committed
+system.cpu.commit.COM:bw_lim_events 8186576 # number cycles where commit BW limit reached
+system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits
+system.cpu.commit.COM:committed_per_cycle::samples 1552269342 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::mean 0.985002 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::stdev 1.301395 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::underflows 0 0.00% 0.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::0 694185983 44.72% 44.72% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::1 509617235 32.83% 77.55% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::2 176087126 11.34% 88.90% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::3 105147186 6.77% 95.67% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::4 31137095 2.01% 97.67% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::5 11224991 0.72% 98.40% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::6 11192282 0.72% 99.12% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::7 5490868 0.35% 99.47% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::8 8186576 0.53% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::overflows 0 0.00% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::min_value 0 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::max_value 8 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::total 1552269342 # Number of insts commited each cycle
+system.cpu.commit.COM:count 1528988756 # Number of instructions committed
+system.cpu.commit.COM:loads 384102160 # Number of loads committed
+system.cpu.commit.COM:membars 0 # Number of memory barriers committed
+system.cpu.commit.COM:refs 533262345 # Number of memory references committed
+system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
+system.cpu.commit.branchMispredicts 17902344 # The number of times a branch was mispredicted
+system.cpu.commit.commitCommittedInsts 1528988756 # The number of committed instructions
+system.cpu.commit.commitNonSpecStalls 553 # The number of times commit has been forced to stall to communicate backwards
+system.cpu.commit.commitSquashedInsts 459109010 # The number of squashed insts skipped by commit
+system.cpu.committedInsts 1528988756 # Number of Instructions Simulated
+system.cpu.committedInsts_total 1528988756 # Number of Instructions Simulated
+system.cpu.cpi 1.068683 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 1.068683 # CPI: Total CPI of All Threads
+system.cpu.dcache.ReadReq_accesses 352008034 # number of ReadReq accesses(hits+misses)
+system.cpu.dcache.ReadReq_avg_miss_latency 14100.976079 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 8499.435037 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 350035037 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 27821183500 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.005605 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 1972997 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_hits 237485 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 14750871500 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.004930 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_misses 1735512 # number of ReadReq MSHR misses
+system.cpu.dcache.WriteReq_accesses 149160201 # number of WriteReq accesses(hits+misses)
+system.cpu.dcache.WriteReq_avg_miss_latency 15942.157352 # average WriteReq miss latency
+system.cpu.dcache.WriteReq_avg_mshr_miss_latency 12645.445755 # average WriteReq mshr miss latency
+system.cpu.dcache.WriteReq_hits 148213244 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 15096537500 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.006349 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 946957 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_hits 159966 # number of WriteReq MSHR hits
+system.cpu.dcache.WriteReq_mshr_miss_latency 9951852000 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.005276 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 786991 # number of WriteReq MSHR misses
+system.cpu.dcache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_refs 197.709284 # Average number of references to valid blocks.
+system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.cache_copies 0 # number of cache copies performed
+system.cpu.dcache.demand_accesses 501168235 # number of demand (read+write) accesses
+system.cpu.dcache.demand_avg_miss_latency 14698.081203 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 9792.941178 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 498248281 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 42917721000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.005826 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 2919954 # number of demand (read+write) misses
+system.cpu.dcache.demand_mshr_hits 397451 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_miss_latency 24702723500 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.005033 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 2522503 # number of demand (read+write) MSHR misses
+system.cpu.dcache.fast_writes 0 # number of fast writes performed
+system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.dcache.occ_%::0 0.997749 # Average percentage of cache occupancy
+system.cpu.dcache.occ_blocks::0 4086.780222 # Average occupied blocks per context
+system.cpu.dcache.overall_accesses 501168235 # number of overall (read+write) accesses
+system.cpu.dcache.overall_avg_miss_latency 14698.081203 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 9792.941178 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_hits 498248281 # number of overall hits
+system.cpu.dcache.overall_miss_latency 42917721000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.005826 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 2919954 # number of overall misses
+system.cpu.dcache.overall_mshr_hits 397451 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_miss_latency 24702723500 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.005033 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 2522503 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.dcache.replacements 2516044 # number of replacements
+system.cpu.dcache.sampled_refs 2520140 # Sample count of references to valid blocks.
+system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.dcache.tagsinuse 4086.780222 # Cycle average of tags in use
+system.cpu.dcache.total_refs 498255076 # Total number of references to valid blocks.
+system.cpu.dcache.warmup_cycle 3876881000 # Cycle when the warmup percentage was hit.
+system.cpu.dcache.writebacks 2224034 # number of writebacks
+system.cpu.decode.DECODE:BlockedCycles 25470243 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:DecodedInsts 2119227193 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 403203369 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 1116867689 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 71636028 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:UnblockCycles 6728041 # Number of cycles decode is unblocking
+system.cpu.fetch.Branches 215739151 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 165973622 # Number of cache lines fetched
+system.cpu.fetch.Cycles 1190006834 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 2725815 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 1144873460 # Number of instructions fetch has processed
+system.cpu.fetch.MiscStallCycles 1839 # Number of cycles fetch has spent waiting on interrupts, or bad addresses, or out of MSHRs
+system.cpu.fetch.SquashCycles 29822694 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.132031 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 165973622 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.fetch.predictedBranches 197674461 # Number of branches that fetch has predicted taken
+system.cpu.fetch.rate 0.700655 # Number of inst fetches per cycle
+system.cpu.fetch.rateDist::samples 1623905370 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::mean 1.336094 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::stdev 1.273592 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::underflows 0 0.00% 0.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::0 477535637 29.41% 29.41% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::1 564706157 34.77% 64.18% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::2 259330057 15.97% 80.15% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::3 261180842 16.08% 96.23% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::4 22809127 1.40% 97.64% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::5 31399021 1.93% 99.57% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::6 502829 0.03% 99.60% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::7 12 0.00% 99.60% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::8 6441688 0.40% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::overflows 0 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::min_value 0 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::max_value 8 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::total 1623905370 # Number of instructions fetched each cycle (Total)
+system.cpu.icache.ReadReq_accesses 165973622 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_avg_miss_latency 22741.617211 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 19372.661290 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 165966882 # number of ReadReq hits
+system.cpu.icache.ReadReq_miss_latency 153278500 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_rate 0.000041 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_misses 6740 # number of ReadReq misses
+system.cpu.icache.ReadReq_mshr_hits 540 # number of ReadReq MSHR hits
+system.cpu.icache.ReadReq_mshr_miss_latency 120110500 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.000037 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_misses 6200 # number of ReadReq MSHR misses
+system.cpu.icache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_refs 49795.025203 # Average number of references to valid blocks.
+system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.cache_copies 0 # number of cache copies performed
+system.cpu.icache.demand_accesses 165973622 # number of demand (read+write) accesses
+system.cpu.icache.demand_avg_miss_latency 22741.617211 # average overall miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency 19372.661290 # average overall mshr miss latency
+system.cpu.icache.demand_hits 165966882 # number of demand (read+write) hits
+system.cpu.icache.demand_miss_latency 153278500 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_rate 0.000041 # miss rate for demand accesses
+system.cpu.icache.demand_misses 6740 # number of demand (read+write) misses
+system.cpu.icache.demand_mshr_hits 540 # number of demand (read+write) MSHR hits
+system.cpu.icache.demand_mshr_miss_latency 120110500 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_rate 0.000037 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_misses 6200 # number of demand (read+write) MSHR misses
+system.cpu.icache.fast_writes 0 # number of fast writes performed
+system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.icache.occ_%::0 0.436573 # Average percentage of cache occupancy
+system.cpu.icache.occ_blocks::0 894.100654 # Average occupied blocks per context
+system.cpu.icache.overall_accesses 165973622 # number of overall (read+write) accesses
+system.cpu.icache.overall_avg_miss_latency 22741.617211 # average overall miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 19372.661290 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.icache.overall_hits 165966882 # number of overall hits
+system.cpu.icache.overall_miss_latency 153278500 # number of overall miss cycles
+system.cpu.icache.overall_miss_rate 0.000041 # miss rate for overall accesses
+system.cpu.icache.overall_misses 6740 # number of overall misses
+system.cpu.icache.overall_mshr_hits 540 # number of overall MSHR hits
+system.cpu.icache.overall_mshr_miss_latency 120110500 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_rate 0.000037 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_misses 6200 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.icache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.icache.replacements 1750 # number of replacements
+system.cpu.icache.sampled_refs 3333 # Sample count of references to valid blocks.
+system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.icache.tagsinuse 894.100654 # Cycle average of tags in use
+system.cpu.icache.total_refs 165966819 # Total number of references to valid blocks.
+system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.icache.writebacks 0 # number of writebacks
+system.cpu.idleCycles 10098709 # Total number of cycles that the CPU has spent unscheduled due to idling
+system.cpu.iew.EXEC:branches 158001976 # Number of branches executed
+system.cpu.iew.EXEC:nop 0 # number of nop insts executed
+system.cpu.iew.EXEC:rate 1.044762 # Inst execution rate
+system.cpu.iew.EXEC:refs 586795750 # number of memory reference insts executed
+system.cpu.iew.EXEC:stores 160862585 # Number of stores executed
+system.cpu.iew.EXEC:swp 0 # number of swp insts executed
+system.cpu.iew.WB:consumers 2114014731 # num instructions consuming a value
+system.cpu.iew.WB:count 1694146367 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.583880 # average fanout of values written-back
+system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
+system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
+system.cpu.iew.WB:producers 1234331323 # num instructions producing a value
+system.cpu.iew.WB:rate 1.036807 # insts written-back per cycle
+system.cpu.iew.WB:sent 1697627373 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 18573506 # Number of branch mispredicts detected at execute
+system.cpu.iew.iewBlockCycles 6103126 # Number of cycles IEW is blocking
+system.cpu.iew.iewDispLoadInsts 508224738 # Number of dispatched load instructions
+system.cpu.iew.iewDispNonSpecInsts 579 # Number of dispatched non-speculative instructions
+system.cpu.iew.iewDispSquashedInsts 12080656 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispStoreInsts 194089353 # Number of dispatched store instructions
+system.cpu.iew.iewDispatchedInsts 1988097398 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 425933165 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 26013466 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 1707144682 # Number of executed instructions
+system.cpu.iew.iewIQFullEvents 381189 # Number of times the IQ has become full, causing a stall
+system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
+system.cpu.iew.iewLSQFullEvents 10588 # Number of times the LSQ has become full, causing a stall
+system.cpu.iew.iewSquashCycles 71636028 # Number of cycles IEW is squashing
+system.cpu.iew.iewUnblockCycles 847228 # Number of cycles IEW is unblocking
+system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding
+system.cpu.iew.lsq.thread.0.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked
+system.cpu.iew.lsq.thread.0.forwLoads 72909425 # Number of loads that had data forwarded from stores
+system.cpu.iew.lsq.thread.0.ignoredResponses 277837 # Number of memory responses ignored because the instruction is squashed
+system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.memOrderViolation 11954619 # Number of memory ordering violations
+system.cpu.iew.lsq.thread.0.rescheduledLoads 832 # Number of loads that were rescheduled
+system.cpu.iew.lsq.thread.0.squashedLoads 124122578 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedStores 44929168 # Number of stores squashed
+system.cpu.iew.memOrderViolationEvents 11954619 # Number of memory order violations
+system.cpu.iew.predictedNotTakenIncorrect 280770 # Number of branches that were predicted not taken incorrectly
+system.cpu.iew.predictedTakenIncorrect 18292736 # Number of branches that were predicted taken incorrectly
+system.cpu.ipc 0.935731 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 0.935731 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0::No_OpClass 1927969 0.11% 0.11% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntAlu 1131725915 65.30% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntMult 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntDiv 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatAdd 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCmp 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCvt 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatMult 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatDiv 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatSqrt 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAdd 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAddAcc 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAlu 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCmp 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCvt 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMisc 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMult 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMultAcc 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShift 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShiftAcc 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdSqrt 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAdd 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAlu 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCmp 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCvt 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatDiv 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMisc 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMult 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMultAcc 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatSqrt 0 0.00% 65.41% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemRead 435582288 25.13% 90.54% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemWrite 163921976 9.46% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IprAccess 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::InstPrefetch 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::total 1733158148 # Type of FU issued
+system.cpu.iq.ISSUE:fu_busy_cnt 1029171 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_rate 0.000594 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_full::No_OpClass 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntAlu 182 0.02% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAddAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAlu 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMisc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMultAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShift 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShiftAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAdd 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAlu 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCmp 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCvt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatDiv 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMisc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMult 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMultAcc 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatSqrt 0 0.00% 0.02% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemRead 466697 45.35% 45.36% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemWrite 562292 54.64% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IprAccess 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::InstPrefetch 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:issued_per_cycle::samples 1623905370 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::mean 1.067278 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::stdev 1.066518 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::underflows 0 0.00% 0.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::0 608633589 37.48% 37.48% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::1 503635145 31.01% 68.49% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::2 353739534 21.78% 90.28% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::3 117719188 7.25% 97.53% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::4 32883027 2.02% 99.55% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::5 6737765 0.41% 99.97% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::6 234496 0.01% 99.98% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::7 322546 0.02% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::8 80 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::overflows 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::min_value 0 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::max_value 8 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::total 1623905370 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:rate 1.060682 # Inst issue rate
+system.cpu.iq.iqInstsAdded 1988096819 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 1733158148 # Number of instructions issued
+system.cpu.iq.iqNonSpecInstsAdded 579 # Number of non-speculative instructions added to the IQ
+system.cpu.iq.iqSquashedInstsExamined 452995728 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedInstsIssued 112 # Number of squashed instructions issued
+system.cpu.iq.iqSquashedNonSpecRemoved 26 # Number of squashed non-spec instructions that were removed
+system.cpu.iq.iqSquashedOperandsExamined 1010995901 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadExReq_accesses 789062 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_avg_miss_latency 34275.179377 # average ReadExReq miss latency
+system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 31001.682665 # average ReadExReq mshr miss latency
+system.cpu.l2cache.ReadExReq_hits 541538 # number of ReadExReq hits
+system.cpu.l2cache.ReadExReq_miss_latency 8483929500 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_rate 0.313694 # miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_misses 247524 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 7673660500 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_mshr_miss_rate 0.313694 # mshr miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_mshr_misses 247524 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadReq_accesses 1734408 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_avg_miss_latency 34153.383782 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 31001.108327 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_hits 1401925 # number of ReadReq hits
+system.cpu.l2cache.ReadReq_miss_latency 11355419500 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.191698 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 332483 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 10307341500 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.191698 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 332483 # number of ReadReq MSHR misses
+system.cpu.l2cache.UpgradeReq_accesses 2863 # number of UpgradeReq accesses(hits+misses)
+system.cpu.l2cache.UpgradeReq_avg_miss_latency 24.346581 # average UpgradeReq miss latency
+system.cpu.l2cache.UpgradeReq_avg_mshr_miss_latency 31002.148228 # average UpgradeReq mshr miss latency
+system.cpu.l2cache.UpgradeReq_hits 70 # number of UpgradeReq hits
+system.cpu.l2cache.UpgradeReq_miss_latency 68000 # number of UpgradeReq miss cycles
+system.cpu.l2cache.UpgradeReq_miss_rate 0.975550 # miss rate for UpgradeReq accesses
+system.cpu.l2cache.UpgradeReq_misses 2793 # number of UpgradeReq misses
+system.cpu.l2cache.UpgradeReq_mshr_miss_latency 86589000 # number of UpgradeReq MSHR miss cycles
+system.cpu.l2cache.UpgradeReq_mshr_miss_rate 0.975550 # mshr miss rate for UpgradeReq accesses
+system.cpu.l2cache.UpgradeReq_mshr_misses 2793 # number of UpgradeReq MSHR misses
+system.cpu.l2cache.Writeback_accesses 2224034 # number of Writeback accesses(hits+misses)
+system.cpu.l2cache.Writeback_hits 2224034 # number of Writeback hits
+system.cpu.l2cache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_refs 5.356881 # Average number of references to valid blocks.
+system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.cache_copies 0 # number of cache copies performed
+system.cpu.l2cache.demand_accesses 2523470 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_avg_miss_latency 34205.361315 # average overall miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 31001.353432 # average overall mshr miss latency
+system.cpu.l2cache.demand_hits 1943463 # number of demand (read+write) hits
+system.cpu.l2cache.demand_miss_latency 19839349000 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.229845 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 580007 # number of demand (read+write) misses
+system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
+system.cpu.l2cache.demand_mshr_miss_latency 17981002000 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.229845 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 580007 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.fast_writes 0 # number of fast writes performed
+system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.l2cache.occ_%::0 0.233067 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_%::1 0.421257 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_blocks::0 7637.149597 # Average occupied blocks per context
+system.cpu.l2cache.occ_blocks::1 13803.753842 # Average occupied blocks per context
+system.cpu.l2cache.overall_accesses 2523470 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_avg_miss_latency 34205.361315 # average overall miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 31001.353432 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_hits 1943463 # number of overall hits
+system.cpu.l2cache.overall_miss_latency 19839349000 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.229845 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 580007 # number of overall misses
+system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
+system.cpu.l2cache.overall_mshr_miss_latency 17981002000 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.229845 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 580007 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.l2cache.replacements 569254 # number of replacements
+system.cpu.l2cache.sampled_refs 588327 # Sample count of references to valid blocks.
+system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.l2cache.tagsinuse 21440.903439 # Cycle average of tags in use
+system.cpu.l2cache.total_refs 3151598 # Total number of references to valid blocks.
+system.cpu.l2cache.warmup_cycle 469235659000 # Cycle when the warmup percentage was hit.
+system.cpu.l2cache.writebacks 411363 # number of writebacks
+system.cpu.memDep0.conflictingLoads 151128770 # Number of conflicting loads.
+system.cpu.memDep0.conflictingStores 47539398 # Number of conflicting stores.
+system.cpu.memDep0.insertedLoads 508224738 # Number of loads inserted to the mem dependence unit.
+system.cpu.memDep0.insertedStores 194089353 # Number of stores inserted to the mem dependence unit.
+system.cpu.numCycles 1634004079 # number of cpu cycles simulated
+system.cpu.rename.RENAME:BlockCycles 11181498 # Number of cycles rename is blocking
+system.cpu.rename.RENAME:CommittedMaps 1427299027 # Number of HB maps that are committed
+system.cpu.rename.RENAME:IQFullEvents 8162354 # Number of times rename has blocked due to IQ full
+system.cpu.rename.RENAME:IdleCycles 430755417 # Number of cycles rename is idle
+system.cpu.rename.RENAME:LSQFullEvents 1988994 # Number of times rename has blocked due to LSQ full
+system.cpu.rename.RENAME:ROBFullEvents 37 # Number of times rename has blocked due to ROB full
+system.cpu.rename.RENAME:RenameLookups 6064799926 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 2072679155 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 1965930252 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 1095363349 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 71636028 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:UnblockCycles 14962968 # Number of cycles rename is unblocking
+system.cpu.rename.RENAME:UndoneMaps 538631225 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 6110 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:serializingInsts 566 # count of serializing insts renamed
+system.cpu.rename.RENAME:skidInsts 21122292 # count of insts added to the skid buffer
+system.cpu.rename.RENAME:tempSerializingInsts 563 # count of temporary serializing insts renamed
+system.cpu.timesIdled 351337 # Number of times that the entire CPU went into an idle state and unscheduled itself
+system.cpu.workload.PROG:num_syscalls 551 # Number of system calls
+
+---------- End Simulation Statistics ----------
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/config.ini b/tests/long/70.twolf/ref/x86/linux/o3-timing/config.ini
new file mode 100644
index 000000000..64a0645d8
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/config.ini
@@ -0,0 +1,519 @@
+[root]
+type=Root
+children=system
+time_sync_enable=false
+time_sync_period=100000000000
+time_sync_spin_threshold=100000000
+
+[system]
+type=System
+children=cpu membus physmem
+mem_mode=atomic
+physmem=system.physmem
+
+[system.cpu]
+type=DerivO3CPU
+children=dcache dtb fuPool icache itb l2cache toL2Bus tracer workload
+BTBEntries=4096
+BTBTagSize=16
+LFSTSize=1024
+LQEntries=32
+RASSize=16
+SQEntries=32
+SSITSize=1024
+activity=0
+backComSize=5
+cachePorts=200
+checker=Null
+choiceCtrBits=2
+choicePredictorSize=8192
+clock=500
+commitToDecodeDelay=1
+commitToFetchDelay=1
+commitToIEWDelay=1
+commitToRenameDelay=1
+commitWidth=8
+cpu_id=0
+decodeToFetchDelay=1
+decodeToRenameDelay=1
+decodeWidth=8
+defer_registration=false
+dispatchWidth=8
+do_checkpoint_insts=true
+do_statistics_insts=true
+dtb=system.cpu.dtb
+fetchToDecodeDelay=1
+fetchTrapLatency=1
+fetchWidth=8
+forwardComSize=5
+fuPool=system.cpu.fuPool
+function_trace=false
+function_trace_start=0
+globalCtrBits=2
+globalHistoryBits=13
+globalPredictorSize=8192
+iewToCommitDelay=1
+iewToDecodeDelay=1
+iewToFetchDelay=1
+iewToRenameDelay=1
+instShiftAmt=2
+issueToExecuteDelay=1
+issueWidth=8
+itb=system.cpu.itb
+localCtrBits=2
+localHistoryBits=11
+localHistoryTableSize=2048
+localPredictorSize=2048
+max_insts_all_threads=0
+max_insts_any_thread=0
+max_loads_all_threads=0
+max_loads_any_thread=0
+numIQEntries=64
+numPhysFloatRegs=256
+numPhysIntRegs=256
+numROBEntries=192
+numRobs=1
+numThreads=1
+phase=0
+predType=tournament
+progress_interval=0
+renameToDecodeDelay=1
+renameToFetchDelay=1
+renameToIEWDelay=2
+renameToROBDelay=1
+renameWidth=8
+smtCommitPolicy=RoundRobin
+smtFetchPolicy=SingleThread
+smtIQPolicy=Partitioned
+smtIQThreshold=100
+smtLSQPolicy=Partitioned
+smtLSQThreshold=100
+smtNumFetchingThreads=1
+smtROBPolicy=Partitioned
+smtROBThreshold=100
+squashWidth=8
+system=system
+tracer=system.cpu.tracer
+trapLatency=13
+wbDepth=1
+wbWidth=8
+workload=system.cpu.workload
+dcache_port=system.cpu.dcache.cpu_side
+icache_port=system.cpu.icache.cpu_side
+
+[system.cpu.dcache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=262144
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.dcache_port
+mem_side=system.cpu.toL2Bus.port[1]
+
+[system.cpu.dtb]
+type=X86TLB
+size=64
+
+[system.cpu.fuPool]
+type=FUPool
+children=FUList0 FUList1 FUList2 FUList3 FUList4 FUList5 FUList6 FUList7 FUList8
+FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 system.cpu.fuPool.FUList8
+
+[system.cpu.fuPool.FUList0]
+type=FUDesc
+children=opList
+count=6
+opList=system.cpu.fuPool.FUList0.opList
+
+[system.cpu.fuPool.FUList0.opList]
+type=OpDesc
+issueLat=1
+opClass=IntAlu
+opLat=1
+
+[system.cpu.fuPool.FUList1]
+type=FUDesc
+children=opList0 opList1
+count=2
+opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1
+
+[system.cpu.fuPool.FUList1.opList0]
+type=OpDesc
+issueLat=1
+opClass=IntMult
+opLat=3
+
+[system.cpu.fuPool.FUList1.opList1]
+type=OpDesc
+issueLat=19
+opClass=IntDiv
+opLat=20
+
+[system.cpu.fuPool.FUList2]
+type=FUDesc
+children=opList0 opList1 opList2
+count=4
+opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2
+
+[system.cpu.fuPool.FUList2.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatAdd
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList1]
+type=OpDesc
+issueLat=1
+opClass=FloatCmp
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList2]
+type=OpDesc
+issueLat=1
+opClass=FloatCvt
+opLat=2
+
+[system.cpu.fuPool.FUList3]
+type=FUDesc
+children=opList0 opList1 opList2
+count=2
+opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2
+
+[system.cpu.fuPool.FUList3.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatMult
+opLat=4
+
+[system.cpu.fuPool.FUList3.opList1]
+type=OpDesc
+issueLat=12
+opClass=FloatDiv
+opLat=12
+
+[system.cpu.fuPool.FUList3.opList2]
+type=OpDesc
+issueLat=24
+opClass=FloatSqrt
+opLat=24
+
+[system.cpu.fuPool.FUList4]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList4.opList
+
+[system.cpu.fuPool.FUList4.opList]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList5]
+type=FUDesc
+children=opList00 opList01 opList02 opList03 opList04 opList05 opList06 opList07 opList08 opList09 opList10 opList11 opList12 opList13 opList14 opList15 opList16 opList17 opList18 opList19
+count=4
+opList=system.cpu.fuPool.FUList5.opList00 system.cpu.fuPool.FUList5.opList01 system.cpu.fuPool.FUList5.opList02 system.cpu.fuPool.FUList5.opList03 system.cpu.fuPool.FUList5.opList04 system.cpu.fuPool.FUList5.opList05 system.cpu.fuPool.FUList5.opList06 system.cpu.fuPool.FUList5.opList07 system.cpu.fuPool.FUList5.opList08 system.cpu.fuPool.FUList5.opList09 system.cpu.fuPool.FUList5.opList10 system.cpu.fuPool.FUList5.opList11 system.cpu.fuPool.FUList5.opList12 system.cpu.fuPool.FUList5.opList13 system.cpu.fuPool.FUList5.opList14 system.cpu.fuPool.FUList5.opList15 system.cpu.fuPool.FUList5.opList16 system.cpu.fuPool.FUList5.opList17 system.cpu.fuPool.FUList5.opList18 system.cpu.fuPool.FUList5.opList19
+
+[system.cpu.fuPool.FUList5.opList00]
+type=OpDesc
+issueLat=1
+opClass=SimdAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList01]
+type=OpDesc
+issueLat=1
+opClass=SimdAddAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList02]
+type=OpDesc
+issueLat=1
+opClass=SimdAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList03]
+type=OpDesc
+issueLat=1
+opClass=SimdCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList04]
+type=OpDesc
+issueLat=1
+opClass=SimdCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList05]
+type=OpDesc
+issueLat=1
+opClass=SimdMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList06]
+type=OpDesc
+issueLat=1
+opClass=SimdMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList07]
+type=OpDesc
+issueLat=1
+opClass=SimdMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList08]
+type=OpDesc
+issueLat=1
+opClass=SimdShift
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList09]
+type=OpDesc
+issueLat=1
+opClass=SimdShiftAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList10]
+type=OpDesc
+issueLat=1
+opClass=SimdSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList11]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList12]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList13]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList14]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList15]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatDiv
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList16]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList17]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList18]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList19]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList6]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList6.opList
+
+[system.cpu.fuPool.FUList6.opList]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList7]
+type=FUDesc
+children=opList0 opList1
+count=4
+opList=system.cpu.fuPool.FUList7.opList0 system.cpu.fuPool.FUList7.opList1
+
+[system.cpu.fuPool.FUList7.opList0]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList7.opList1]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList8]
+type=FUDesc
+children=opList
+count=1
+opList=system.cpu.fuPool.FUList8.opList
+
+[system.cpu.fuPool.FUList8.opList]
+type=OpDesc
+issueLat=3
+opClass=IprAccess
+opLat=3
+
+[system.cpu.icache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=131072
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.icache_port
+mem_side=system.cpu.toL2Bus.port[0]
+
+[system.cpu.itb]
+type=X86TLB
+size=64
+
+[system.cpu.l2cache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=2097152
+subblock_size=0
+tgts_per_mshr=5
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.toL2Bus.port[2]
+mem_side=system.membus.port[1]
+
+[system.cpu.toL2Bus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
+
+[system.cpu.tracer]
+type=ExeTracer
+
+[system.cpu.workload]
+type=LiveProcess
+cmd=twolf smred
+cwd=build/X86_SE/tests/opt/long/70.twolf/x86/linux/o3-timing
+egid=100
+env=
+errout=cerr
+euid=100
+executable=/dist/m5/cpu2000/binaries/x86/linux/twolf
+gid=100
+input=cin
+max_stack_size=67108864
+output=cout
+pid=100
+ppid=99
+simpoint=0
+system=system
+uid=100
+
+[system.membus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.physmem.port[0] system.cpu.l2cache.mem_side
+
+[system.physmem]
+type=PhysicalMemory
+file=
+latency=30000
+latency_var=0
+null=false
+range=0:134217727
+zero=false
+port=system.membus.port[0]
+
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/simerr b/tests/long/70.twolf/ref/x86/linux/o3-timing/simerr
new file mode 100755
index 000000000..94d399eab
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/simerr
@@ -0,0 +1,7 @@
+warn: Sockets disabled, not accepting gdb connections
+For more information see: http://www.m5sim.org/warn/d946bea6
+warn: instruction 'fnstcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+warn: instruction 'fldcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+hack: be nice to actually delete the event here
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/simout b/tests/long/70.twolf/ref/x86/linux/o3-timing/simout
new file mode 100755
index 000000000..4773ac641
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/simout
@@ -0,0 +1,32 @@
+M5 Simulator System
+
+Copyright (c) 2001-2008
+The Regents of The University of Michigan
+All Rights Reserved
+
+
+M5 compiled Jan 31 2011 16:34:44
+M5 revision 1b98eea40540 7883 default qtip tip x86o3regressions.patch
+M5 started Jan 31 2011 16:34:46
+M5 executing on burrito
+command line: build/X86_SE/m5.opt -d build/X86_SE/tests/opt/long/70.twolf/x86/linux/o3-timing -re tests/run.py build/X86_SE/tests/opt/long/70.twolf/x86/linux/o3-timing
+Couldn't unlink build/X86_SE/tests/opt/long/70.twolf/x86/linux/o3-timing/smred.sav
+Couldn't unlink build/X86_SE/tests/opt/long/70.twolf/x86/linux/o3-timing/smred.sv2
+Global frequency set at 1000000000000 ticks per second
+info: Entering event queue @ 0. Starting simulation...
+
+TimberWolfSC version:v4.3a date:Mon Jan 25 18:50:36 EST 1988
+Standard Cell Placement and Global Routing Program
+Authors: Carl Sechen, Bill Swartz
+ Yale University
+info: Increasing stack size by one page.
+info: Increasing stack size by one page.
+ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
+ 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
+ 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
+ 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
+ 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
+ 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
+106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
+122 123 124 Exiting @ tick 127560542500 because target called exit()
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.out b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.out
new file mode 100644
index 000000000..00387ae5c
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.out
@@ -0,0 +1,276 @@
+
+TimberWolfSC version:v4.3a date:Mon Jan 25 18:50:36 EST 1988
+Standard Cell Placement and Global Routing Program
+Authors: Carl Sechen, Bill Swartz
+ Yale University
+
+
+NOTE: Restart file .rs2 not used
+
+TimberWolf will perform a global route step
+rowSep: 1.000000
+feedThruWidth: 4
+
+******************
+BLOCK DATA
+block:1 desire:85
+block:2 desire:85
+Total Desired Length: 170
+total cell length: 168
+total block length: 168
+block x-span:84 block y-span:78
+implicit feed thru range: -84
+Using default value of bin.penalty.control:1.000000
+numBins automatically set to:5
+binWidth = average_cell_width + 0 sigma= 17
+average_cell_width is:16
+standard deviation of cell length is:23.6305
+TimberWolfSC starting from the beginning
+
+
+
+THIS IS THE ROUTE COST OF THE ORIGINAL PLACEMENT: 645
+The number of nets with 1 pin is 4
+The number of nets with 2 pin is 9
+The number of nets with 3 pin is 0
+The number of nets with 4 pin is 2
+The number of nets with 5 pin is 0
+The number of nets with 6 pin is 0
+The number of nets with 7 pin is 0
+The number of nets with 8 pin is 0
+The number of nets with 9 pin is 0
+The number of nets with 10 pin or more is 0
+
+New Cost Function: Initial Horizontal Cost:242
+New Cost Function: FEEDS:0 MISSING_ROWS:-46
+
+bdxlen:86 bdylen:78
+l:0 t:78 r:86 b:0
+
+
+
+THIS IS THE ROUTE COST OF THE CURRENT PLACEMENT: 645
+
+
+
+THIS IS THE PENALTY OF THE CURRENT PLACEMENT: 44
+
+The rand generator seed was at utemp() : 1
+
+
+ tempfile[0][0] = 0.982500 tempfile[0][1] = 90.000000
+ tempfile[1][0] = 0.915000 tempfile[1][1] = 20.000000
+ tempfile[2][0] = 0.700000 tempfile[2][1] = 10.000000
+ tempfile[3][0] = 0.100000 tempfile[3][1] = 0.000000
+
+ I T fds Wire Penalty P_lim Epct binC rowC acc s/p early FDs MRs
+ 1 500 0 929 592 160 30.0 1.0 3.0 84.2 34.7 0.0 0 40
+ 2 491 0 876 106 726 0.0 0.8 2.5 80.0 18.5 0.0 0 46
+ 3 482 0 822 273 372 0.0 0.5 1.5 80.8 21.2 0.0 0 46
+ 4 474 0 826 53 247 0.0 0.5 0.9 65.0 21.9 0.0 0 48
+ 5 465 8 987 73 190 0.0 0.5 0.5 50.0 38.3 0.0 0 46
+ 6 457 8 851 67 226 0.0 0.5 0.5 53.8 42.9 0.0 0 52
+ 7 449 8 1067 108 190 0.0 0.5 0.5 46.2 53.8 0.0 0 50
+ 8 441 8 918 106 171 0.0 0.5 0.5 47.1 40.4 0.0 0 48
+ 9 434 8 812 101 197 0.0 0.5 0.5 53.6 21.0 0.0 0 48
+ 10 426 8 1038 121 181 0.0 0.5 0.5 43.6 27.1 0.0 0 48
+ 11 419 8 898 93 187 0.0 0.5 0.5 45.3 47.8 0.0 0 50
+ 12 411 4 857 94 240 0.0 0.5 0.5 62.7 51.6 0.0 0 44
+ 13 404 8 1043 88 185 0.0 0.5 0.5 54.0 52.8 0.0 0 50
+ 14 397 8 767 94 154 0.0 0.5 0.5 33.8 35.0 0.0 0 50
+ 15 390 8 862 89 183 0.0 0.5 0.5 55.6 29.0 0.0 0 46
+ 16 383 4 798 79 173 0.0 0.5 0.5 57.5 35.3 0.0 0 52
+ 17 376 8 827 100 152 0.0 0.5 0.5 35.3 81.8 0.0 0 50
+ 18 370 8 878 101 208 0.0 0.5 0.5 44.7 46.2 0.0 0 48
+ 19 363 4 921 67 167 0.0 0.5 0.5 57.1 34.7 0.0 0 48
+ 20 357 8 933 93 154 0.0 0.5 0.5 46.5 43.6 0.0 0 52
+ 21 351 8 930 89 147 0.0 0.5 0.5 39.4 36.5 0.0 0 52
+ 22 345 8 951 79 142 0.0 0.5 0.5 32.8 51.3 0.0 0 50
+ 23 339 8 1046 87 207 0.0 0.5 0.5 52.8 61.0 0.0 0 48
+ 24 333 4 989 96 185 0.0 0.5 0.5 45.3 43.3 0.0 0 42
+ 25 327 4 577 86 157 0.0 0.5 0.5 31.1 55.3 0.0 0 52
+ 26 321 8 776 97 174 0.0 0.5 0.5 47.9 62.5 0.0 0 52
+ 27 315 8 850 81 188 0.0 0.5 0.5 45.0 55.2 0.0 0 50
+ 28 310 8 898 97 148 0.0 0.5 0.5 43.0 45.8 0.0 0 48
+ 29 304 8 889 65 173 0.0 0.5 0.5 32.5 41.3 0.0 0 50
+ 30 299 8 858 81 153 0.0 0.5 0.5 44.3 29.2 0.0 0 46
+ 31 294 8 871 82 187 0.0 0.5 0.5 45.7 47.7 0.0 0 48
+ 32 289 8 782 109 173 0.0 0.5 0.5 35.2 57.4 0.0 0 48
+ 33 284 8 743 98 189 0.0 0.6 0.5 41.8 64.3 0.0 0 52
+ 34 279 8 943 90 147 0.0 0.5 0.5 38.6 32.8 0.0 0 48
+ 35 274 8 907 57 166 0.0 0.5 0.5 33.6 51.0 0.0 0 48
+ 36 269 8 900 70 148 0.0 0.5 0.5 45.0 41.4 0.0 0 50
+ 37 264 4 875 106 133 0.0 0.5 0.5 31.7 55.3 0.0 0 52
+ 38 260 8 1023 145 149 0.0 0.6 0.5 28.7 65.0 0.0 0 52
+ 39 255 8 801 151 173 0.0 0.9 0.5 41.7 41.2 0.0 0 48
+ 40 251 8 741 104 159 0.0 0.8 0.5 36.2 47.5 0.0 0 48
+ 41 246 8 828 108 149 0.0 0.5 0.5 34.6 50.9 0.0 0 50
+ 42 242 8 947 128 132 0.0 0.7 0.5 34.2 39.0 0.0 0 50
+ 43 238 8 917 101 142 0.0 0.8 0.5 34.4 50.9 0.0 0 48
+ 44 234 8 761 86 129 0.0 0.5 0.5 42.0 36.4 0.0 0 52
+ 45 229 8 979 106 137 0.0 0.5 0.5 29.2 55.3 0.0 0 50
+ 46 225 8 806 74 130 0.0 0.7 0.5 33.1 65.4 0.0 0 52
+ 47 221 8 971 125 114 0.0 0.5 0.5 31.9 45.6 0.0 0 52
+ 48 218 8 869 125 104 0.0 0.9 0.5 30.0 56.0 0.0 0 48
+ 49 214 8 999 153 140 0.0 0.8 0.5 30.4 46.4 0.0 0 52
+ 50 210 8 798 192 139 0.0 1.0 0.5 28.9 50.0 0.0 0 52
+ 51 206 8 860 125 157 0.0 1.2 0.5 31.5 26.9 0.0 0 52
+ 52 203 8 893 186 127 5.9 0.9 0.5 26.4 42.3 0.0 0 46
+ 53 199 8 863 126 141 0.0 1.2 0.5 32.5 44.4 0.0 0 44
+ 54 196 8 788 97 133 0.0 0.9 0.5 37.5 40.0 0.0 0 50
+ 55 192 8 926 119 116 0.0 0.6 0.5 26.1 55.3 0.0 0 52
+ 56 189 8 789 162 107 0.0 0.8 0.5 25.2 40.4 0.0 0 48
+ 57 186 8 878 107 128 0.0 1.1 0.5 23.1 34.0 0.0 0 52
+ 58 182 8 775 105 122 0.0 0.8 0.5 25.5 57.4 0.0 0 50
+ 59 179 8 747 94 129 0.0 0.7 0.5 34.3 37.3 0.0 0 50
+ 60 176 8 845 96 138 0.0 0.6 0.5 28.3 41.7 0.0 0 52
+ 61 173 8 961 121 110 0.0 0.6 0.5 29.0 52.6 0.0 0 48
+ 62 170 4 911 110 109 0.0 0.9 0.5 33.5 33.3 0.0 0 48
+ 63 167 8 656 109 109 0.0 0.8 0.5 21.9 44.7 0.0 0 52
+ 64 164 8 934 117 105 0.0 0.8 0.5 15.5 50.0 0.0 0 52
+ 65 161 8 972 125 95 0.0 0.8 0.5 24.4 50.0 0.0 0 50
+ 66 158 8 894 125 101 0.0 0.9 0.5 27.2 35.9 0.0 0 52
+ 67 155 8 798 146 129 0.0 1.0 0.5 22.8 58.7 0.0 0 52
+ 68 153 8 901 183 92 0.0 1.1 0.5 23.6 34.5 0.0 0 52
+ 69 150 8 977 197 103 0.0 1.4 0.5 23.6 36.8 0.0 0 52
+ 70 147 8 905 262 93 0.0 1.5 0.5 20.3 63.4 0.0 0 52
+ 71 145 8 995 148 122 0.0 1.9 0.5 20.9 35.3 0.0 0 52
+ 72 142 8 934 230 99 0.0 1.6 0.5 20.0 65.9 0.0 0 52
+ 73 140 8 862 173 100 0.0 1.8 0.5 26.8 46.8 0.0 0 52
+ 74 137 8 924 139 90 0.0 1.7 0.5 16.8 42.5 0.0 0 52
+ 75 135 8 888 168 113 0.0 1.6 0.5 22.9 40.4 0.0 0 52
+ 76 133 8 712 212 84 0.0 1.6 0.5 13.4 46.9 0.0 0 52
+ 77 130 8 868 210 91 0.0 1.7 0.5 17.7 51.2 0.0 0 52
+ 78 128 8 952 307 92 0.0 1.9 0.5 19.7 44.9 0.0 0 50
+ 79 126 8 801 157 107 0.0 2.2 0.5 15.8 39.0 0.0 0 52
+ 80 123 8 849 147 93 0.0 2.1 0.5 15.6 51.4 0.0 0 52
+ 81 121 8 799 154 86 0.0 1.9 0.5 12.2 50.0 0.0 0 52
+ 82 119 8 941 213 82 0.0 1.8 0.5 19.5 41.2 0.0 0 50
+ 83 117 8 751 268 94 0.0 2.0 0.5 20.8 42.6 0.0 0 50
+ 84 115 8 828 198 102 0.0 2.2 0.5 15.5 59.5 0.0 0 52
+ 85 113 8 898 266 123 0.0 2.2 0.5 13.2 85.2 0.0 0 52
+ 86 111 8 943 190 93 0.0 2.4 0.5 19.5 45.1 0.0 0 52
+ 87 109 8 864 183 65 0.0 2.4 0.5 14.9 31.8 0.0 0 52
+ 88 107 8 793 203 93 0.0 2.4 0.5 11.8 35.3 0.0 0 52
+ 89 105 8 752 162 74 1.2 2.4 0.5 13.1 21.4 0.0 0 52
+ 90 103 8 801 149 77 0.0 2.3 0.5 9.7 58.3 0.0 0 52
+ 91 102 8 901 230 99 0.0 2.2 0.5 16.0 25.5 0.0 0 52
+ 92 100 8 826 201 87 0.0 2.4 0.5 12.8 45.7 0.0 0 52
+ 93 98 8 810 196 83 0.0 2.5 0.5 14.0 24.4 0.0 0 52
+ 94 96 8 857 209 68 1.0 2.5 0.5 11.5 27.0 5.1 0 52
+ 95 95 8 771 174 91 0.0 2.6 0.5 10.5 26.5 0.0 0 52
+ 96 93 8 955 210 59 0.0 2.6 0.5 10.0 36.7 0.7 0 52
+ 97 91 8 833 206 53 0.0 2.7 0.5 10.2 19.4 1.4 0 52
+ 98 90 8 888 229 86 0.0 2.8 0.5 8.1 36.0 0.0 0 52
+ 99 88 8 794 186 91 1.0 2.9 0.5 8.3 25.0 0.5 0 52
+100 81 8 756 170 72 1.0 2.9 0.5 6.0 23.8 7.0 0 52
+101 74 8 791 176 67 0.0 2.9 0.5 4.4 58.3 4.0 0 52
+102 67 8 813 213 43 0.0 3.0 0.5 7.0 150.0 4.2 0 52
+103 62 8 779 245 39 0.0 3.1 0.5 3.2 16.7 13.0 0 52
+104 56 8 767 303 63 0.0 3.2 0.5 4.1 20.0 0.7 0 52
+105 52 8 757 270 57 0.0 3.5 0.5 6.4 3.7 0.5 0 52
+106 47 8 763 283 41 0.0 3.7 0.5 4.5 0.0 0.0 0 52
+107 43 8 768 283 36 0.0 3.7 0.5 2.9 18.2 3.6 0 52
+108 39 8 804 283 25 0.0 3.7 0.5 3.1 0.0 6.2 0 52
+109 36 8 781 283 24 0.0 3.7 0.5 3.6 6.7 6.7 0 52
+110 33 8 738 298 42 0.0 3.7 0.5 3.3 15.4 3.5 0 52
+111 30 8 761 298 36 0.0 3.7 0.5 2.2 0.0 4.3 0 52
+112 27 8 769 298 37 0.0 3.7 0.5 0.9 0.0 2.2 0 52
+113 25 8 745 298 31 0.0 3.7 0.5 1.5 0.0 6.6 0 52
+114 23 8 753 298 16 0.0 3.7 0.5 1.3 0.0 2.8 0 52
+115 21 8 745 298 11 0.0 3.7 0.5 1.5 0.0 14.0 0 52
+116 19 8 747 298 21 0.0 3.7 0.5 2.1 0.0 5.8 0 52
+117 13 8 737 298 12 0.0 3.7 0.5 1.0 0.0 10.0 0 52
+118 9 8 736 298 4 0.0 3.7 0.5 1.5 0.0 18.5 0 52
+119 0 8 739 298 0 0.0 3.7 0.5 1.8 0.0 18.0 0 52
+120 0 8 732 298 0 0.0 3.7 0.5 1.2 0.0 21.8 0 52
+121 0 8 732 19 -1 0.0 0.0 0.5 0.0 100.0 54.8
+
+Initial Wiring Cost: 645 Final Wiring Cost: 732
+############## Percent Wire Cost Reduction: -13
+
+
+Initial Wire Length: 645 Final Wire Length: 732
+************** Percent Wire Length Reduction: -13
+
+
+Initial Horiz. Wire: 216 Final Horiz. Wire: 147
+$$$$$$$$$$$ Percent H-Wire Length Reduction: 32
+
+
+Initial Vert. Wire: 429 Final Vert. Wire: 585
+@@@@@@@@@@@ Percent V-Wire Length Reduction: -36
+
+Before Feeds are Added:
+BLOCK TOTAL CELL LENGTHS OVER/UNDER TARGET
+ 1 82 -20
+ 2 86 -16
+
+LONGEST Block is:2 Its length is:86
+BLOCK TOTAL CELL LENGTHS OVER/UNDER TARGET
+ 1 86 -16
+ 2 86 -16
+
+LONGEST Block is:1 Its length is:86
+Added: 1 feed-through cells
+
+Removed the cell overlaps --- Will do neighbor interchanges only now
+
+TOTAL INTERCONNECT LENGTH: 994
+OVERLAP PENALTY: 0
+
+initialRowControl: 1.650
+finalRowControl: 0.300
+iter T Wire accept
+ 122 0.001 976 16%
+ 123 0.001 971 0%
+ 124 0.001 971 0%
+Total Feed-Alignment Movement (Pass 1): 0
+Total Feed-Alignment Movement (Pass 2): 0
+Total Feed-Alignment Movement (Pass 3): 0
+Total Feed-Alignment Movement (Pass 4): 0
+Total Feed-Alignment Movement (Pass 5): 0
+Total Feed-Alignment Movement (Pass 6): 0
+Total Feed-Alignment Movement (Pass 7): 0
+Total Feed-Alignment Movement (Pass 8): 0
+
+The rand generator seed was at globroute() : 987654321
+
+
+Total Number of Net Segments: 9
+Number of Switchable Net Segments: 0
+
+Number of channels: 3
+
+
+
+THIS IS THE ORIGINAL NUMBER OF TRACKS: 5
+
+
+no. of accepted flips: 0
+no. of attempted flips: 0
+THIS IS THE NUMBER OF TRACKS: 5
+
+
+
+FINAL NUMBER OF ROUTING TRACKS: 5
+
+MAX OF CHANNEL: 1 is: 0
+MAX OF CHANNEL: 2 is: 4
+MAX OF CHANNEL: 3 is: 1
+FINAL TOTAL INTERCONNECT LENGTH: 978
+FINAL OVERLAP PENALTY: 0 FINAL VALUE OF TOTAL COST IS: 978
+MAX NUMBER OF ATTEMPTED FLIPS PER T: 55
+
+
+cost_scale_factor:3.90616
+
+Number of Feed Thrus: 0
+Number of Implicit Feed Thrus: 0
+
+Statistics:
+Number of Standard Cells: 10
+Number of Pads: 0
+Number of Nets: 15
+Number of Pins: 46
+Usage statistics not available
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pin b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pin
new file mode 100644
index 000000000..62b922e4e
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pin
@@ -0,0 +1,17 @@
+$COUNT_1/$AND2_1/$ND2_1$Z 1 $COUNT_1/$AND2_1/$ND2_1 00#Z 17 52 2 1 0
+$COUNT_1/$AND2_1/$ND2_1$Z 1 ACOUNT_1 00#A 15 26 2 -1 0
+B7 2 $COUNT_1/$FJK3_2 00#K 25 78 3 -1 0
+B7 2 $COUNT_1/$FJK3_2 00#J 23 78 3 -1 0
+B7 3 $COUNT_1/$AND2_2/$ND2_1 00#A 9 26 2 -1 0
+B7 3 ACOUNT_1 01#Z 17 26 2 -1 0
+B6 5 $COUNT_1/$FJK3_1 00#K 25 26 2 -1 0
+B6 5 $COUNT_1/$FJK3_1 00#J 23 26 2 -1 0
+B6 5 $COUNT_1/$AND2_3/$IV_1 01#Z 7 26 2 -1 0
+$COUNT_1/$FJK3_1$Q 6 $COUNT_1/$FJK3_1 01#Q 81 26 2 -1 0
+$COUNT_1/$FJK3_1$Q 6 $COUNT_1/$AND2_1/$ND2_1 00#B 19 52 2 1 0
+$COUNT_1/$FJK3_2$Q 7 $COUNT_1/$FJK3_2 00#Q 81 52 2 1 0
+$COUNT_1/$FJK3_2$Q 7 $COUNT_1/$AND2_2/$ND2_1 01#B 11 26 2 -1 0
+$COUNT_1/$AND2_3/$ND2_1$Z 8 $COUNT_1/$AND2_3/$ND2_1 00#Z 5 52 2 1 0
+$COUNT_1/$AND2_3/$ND2_1$Z 8 $COUNT_1/$AND2_3/$IV_1 00#A 5 26 2 -1 0
+$COUNT_1/$AND2_4/$ND2_1$Z 9 $COUNT_1/$AND2_4/$ND2_1 00#Z 7 52 2 1 0
+$COUNT_1/$AND2_4/$ND2_1$Z 9 $COUNT_1/$AND2_4/$IV_1 00#A 3 26 2 -1 0
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl1 b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl1
new file mode 100644
index 000000000..bdc569e39
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl1
@@ -0,0 +1,11 @@
+$COUNT_1/$AND2_4/$IV_1 0 0 4 26 0 1
+$COUNT_1/$AND2_3/$IV_1 4 0 8 26 2 1
+$COUNT_1/$AND2_2/$ND2_1 8 0 14 26 0 1
+ACOUNT_1 14 0 18 26 2 1
+twfeed1 18 0 22 26 0 1
+$COUNT_1/$FJK3_1 22 0 86 26 0 1
+$COUNT_1/$AND2_3/$ND2_1 0 52 6 78 0 2
+$COUNT_1/$AND2_4/$ND2_1 6 52 12 78 2 2
+$COUNT_1/$AND2_2/$IV_1 12 52 16 78 2 2
+$COUNT_1/$AND2_1/$ND2_1 16 52 22 78 2 2
+$COUNT_1/$FJK3_2 22 52 86 78 0 2
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl2 b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl2
new file mode 100644
index 000000000..6e2601e82
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.pl2
@@ -0,0 +1,2 @@
+1 0 0 86 26 0 0
+2 0 52 86 78 0 0
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sav b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sav
new file mode 100644
index 000000000..04c8e9935
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sav
@@ -0,0 +1,18 @@
+0.009592
+121
+0
+1
+0.000000
+0.500000
+3.906156
+1
+1 1 2 37 13
+2 2 0 34 65
+3 2 2 63 65
+4 1 0 59 13
+5 1 2 32 13
+6 2 0 23 65
+7 1 2 12 13
+8 2 0 6 65
+9 1 0 70 13
+10 2 0 70 65
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sv2 b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sv2
new file mode 100644
index 000000000..9dd68ecdb
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.sv2
@@ -0,0 +1,19 @@
+0.001000
+123
+0
+2
+0.000000
+0.500000
+3.906156
+1
+1 1 2 16 13
+2 2 2 19 65
+3 2 2 14 65
+4 1 0 11 13
+5 1 2 6 13
+6 2 0 3 65
+7 1 0 2 13
+8 2 2 9 65
+9 1 0 50 13
+10 2 0 54 65
+11 1 0 84 13
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.twf b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.twf
new file mode 100644
index 000000000..a4c2eac35
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/smred.twf
@@ -0,0 +1,29 @@
+net 1
+segment channel 2
+ pin1 1 pin2 7 0 0
+net 2
+segment channel 3
+pin1 41 pin2 42 0 0
+segment channel 2
+pin1 12 pin2 3 0 0
+net 3
+segment channel 2
+pin1 35 pin2 36 0 0
+segment channel 2
+pin1 19 pin2 35 0 0
+net 4
+segment channel 2
+ pin1 5 pin2 38 0 0
+net 5
+net 7
+segment channel 2
+ pin1 14 pin2 43 0 0
+net 8
+segment channel 2
+ pin1 23 pin2 17 0 0
+net 9
+net 11
+segment channel 2
+ pin1 25 pin2 31 0 0
+net 14
+net 15
diff --git a/tests/long/70.twolf/ref/x86/linux/o3-timing/stats.txt b/tests/long/70.twolf/ref/x86/linux/o3-timing/stats.txt
new file mode 100644
index 000000000..daddf87eb
--- /dev/null
+++ b/tests/long/70.twolf/ref/x86/linux/o3-timing/stats.txt
@@ -0,0 +1,443 @@
+
+---------- Begin Simulation Statistics ----------
+host_inst_rate 118559 # Simulator instruction rate (inst/s)
+host_mem_usage 239716 # Number of bytes of host memory used
+host_seconds 1867.12 # Real time elapsed on the host
+host_tick_rate 68319429 # Simulator tick rate (ticks/s)
+sim_freq 1000000000000 # Frequency of simulated ticks
+sim_insts 221363017 # Number of instructions simulated
+sim_seconds 0.127561 # Number of seconds simulated
+sim_ticks 127560542500 # Number of ticks simulated
+system.cpu.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
+system.cpu.BPredUnit.BTBHits 16939138 # Number of BTB hits
+system.cpu.BPredUnit.BTBLookups 19067543 # Number of BTB lookups
+system.cpu.BPredUnit.RASInCorrect 0 # Number of incorrect RAS predictions.
+system.cpu.BPredUnit.condIncorrect 3582609 # Number of conditional branches incorrect
+system.cpu.BPredUnit.condPredicted 19223942 # Number of conditional branches predicted
+system.cpu.BPredUnit.lookups 19223942 # Number of BP lookups
+system.cpu.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
+system.cpu.commit.COM:branches 12326943 # Number of branches committed
+system.cpu.commit.COM:bw_lim_events 324452 # number cycles where commit BW limit reached
+system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits
+system.cpu.commit.COM:committed_per_cycle::samples 243992167 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::mean 0.907255 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::stdev 1.057266 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::underflows 0 0.00% 0.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::0 97637775 40.02% 40.02% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::1 102801930 42.13% 82.15% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::2 24473335 10.03% 92.18% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::3 10688182 4.38% 96.56% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::4 6438517 2.64% 99.20% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::5 836047 0.34% 99.54% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::6 523551 0.21% 99.76% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::7 268378 0.11% 99.87% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::8 324452 0.13% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::overflows 0 0.00% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::min_value 0 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::max_value 8 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::total 243992167 # Number of insts commited each cycle
+system.cpu.commit.COM:count 221363017 # Number of instructions committed
+system.cpu.commit.COM:loads 56649590 # Number of loads committed
+system.cpu.commit.COM:membars 0 # Number of memory barriers committed
+system.cpu.commit.COM:refs 77165306 # Number of memory references committed
+system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
+system.cpu.commit.branchMispredicts 3582617 # The number of times a branch was mispredicted
+system.cpu.commit.commitCommittedInsts 221363017 # The number of committed instructions
+system.cpu.commit.commitNonSpecStalls 1246 # The number of times commit has been forced to stall to communicate backwards
+system.cpu.commit.commitSquashedInsts 70151117 # The number of squashed insts skipped by commit
+system.cpu.committedInsts 221363017 # Number of Instructions Simulated
+system.cpu.committedInsts_total 221363017 # Number of Instructions Simulated
+system.cpu.cpi 1.152501 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 1.152501 # CPI: Total CPI of All Threads
+system.cpu.dcache.ReadReq_accesses 51727133 # number of ReadReq accesses(hits+misses)
+system.cpu.dcache.ReadReq_avg_miss_latency 34247.563353 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 34193.055556 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 51726620 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 17569000 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.000010 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 513 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_hits 153 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 12309500 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.000007 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_misses 360 # number of ReadReq MSHR misses
+system.cpu.dcache.WriteReq_accesses 20515730 # number of WriteReq accesses(hits+misses)
+system.cpu.dcache.WriteReq_avg_miss_latency 26394.870828 # average WriteReq miss latency
+system.cpu.dcache.WriteReq_avg_mshr_miss_latency 35294.285714 # average WriteReq mshr miss latency
+system.cpu.dcache.WriteReq_hits 20510427 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 139972000 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.000258 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 5303 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_hits 3728 # number of WriteReq MSHR hits
+system.cpu.dcache.WriteReq_mshr_miss_latency 55588500 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.000077 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 1575 # number of WriteReq MSHR misses
+system.cpu.dcache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_refs 37331.807235 # Average number of references to valid blocks.
+system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.cache_copies 0 # number of cache copies performed
+system.cpu.dcache.demand_accesses 72242863 # number of demand (read+write) accesses
+system.cpu.dcache.demand_avg_miss_latency 27087.517194 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 35089.405685 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 72237047 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 157541000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.000081 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 5816 # number of demand (read+write) misses
+system.cpu.dcache.demand_mshr_hits 3881 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_miss_latency 67898000 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.000027 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 1935 # number of demand (read+write) MSHR misses
+system.cpu.dcache.fast_writes 0 # number of fast writes performed
+system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.dcache.occ_%::0 0.336997 # Average percentage of cache occupancy
+system.cpu.dcache.occ_blocks::0 1380.340507 # Average occupied blocks per context
+system.cpu.dcache.overall_accesses 72242863 # number of overall (read+write) accesses
+system.cpu.dcache.overall_avg_miss_latency 27087.517194 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 35089.405685 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_hits 72237047 # number of overall hits
+system.cpu.dcache.overall_miss_latency 157541000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.000081 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 5816 # number of overall misses
+system.cpu.dcache.overall_mshr_hits 3881 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_miss_latency 67898000 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.000027 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 1935 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.dcache.replacements 46 # number of replacements
+system.cpu.dcache.sampled_refs 1935 # Sample count of references to valid blocks.
+system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.dcache.tagsinuse 1380.340507 # Cycle average of tags in use
+system.cpu.dcache.total_refs 72237047 # Total number of references to valid blocks.
+system.cpu.dcache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.dcache.writebacks 9 # number of writebacks
+system.cpu.decode.DECODE:BlockedCycles 5656231 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:DecodedInsts 309852988 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 53029625 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 184220573 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 11003980 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:UnblockCycles 1085738 # Number of cycles decode is unblocking
+system.cpu.fetch.Branches 19223942 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 20440935 # Number of cache lines fetched
+system.cpu.fetch.Cycles 196264127 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 182297 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 184675827 # Number of instructions fetch has processed
+system.cpu.fetch.MiscStallCycles 11 # Number of cycles fetch has spent waiting on interrupts, or bad addresses, or out of MSHRs
+system.cpu.fetch.SquashCycles 4455378 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.075352 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 20440935 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.fetch.predictedBranches 16939138 # Number of branches that fetch has predicted taken
+system.cpu.fetch.rate 0.723875 # Number of inst fetches per cycle
+system.cpu.fetch.rateDist::samples 254996147 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::mean 1.239017 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::stdev 1.348981 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::underflows 0 0.00% 0.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::0 66307953 26.00% 26.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::1 121646972 47.71% 73.71% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::2 37731127 14.80% 88.51% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::3 20479784 8.03% 96.54% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::4 1948325 0.76% 97.30% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::5 1108960 0.43% 97.74% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::6 1062530 0.42% 98.15% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::7 1340 0.00% 98.15% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::8 4709156 1.85% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::overflows 0 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::min_value 0 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::max_value 8 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::total 254996147 # Number of instructions fetched each cycle (Total)
+system.cpu.icache.ReadReq_accesses 20440935 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_avg_miss_latency 25661.556820 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 22374.875175 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 20435488 # number of ReadReq hits
+system.cpu.icache.ReadReq_miss_latency 139778500 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_rate 0.000266 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_misses 5447 # number of ReadReq misses
+system.cpu.icache.ReadReq_mshr_hits 440 # number of ReadReq MSHR hits
+system.cpu.icache.ReadReq_mshr_miss_latency 112031000 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.000245 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_misses 5007 # number of ReadReq MSHR misses
+system.cpu.icache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_refs 4082.198961 # Average number of references to valid blocks.
+system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.cache_copies 0 # number of cache copies performed
+system.cpu.icache.demand_accesses 20440935 # number of demand (read+write) accesses
+system.cpu.icache.demand_avg_miss_latency 25661.556820 # average overall miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency 22374.875175 # average overall mshr miss latency
+system.cpu.icache.demand_hits 20435488 # number of demand (read+write) hits
+system.cpu.icache.demand_miss_latency 139778500 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_rate 0.000266 # miss rate for demand accesses
+system.cpu.icache.demand_misses 5447 # number of demand (read+write) misses
+system.cpu.icache.demand_mshr_hits 440 # number of demand (read+write) MSHR hits
+system.cpu.icache.demand_mshr_miss_latency 112031000 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_rate 0.000245 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_misses 5007 # number of demand (read+write) MSHR misses
+system.cpu.icache.fast_writes 0 # number of fast writes performed
+system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.icache.occ_%::0 0.746987 # Average percentage of cache occupancy
+system.cpu.icache.occ_blocks::0 1529.828433 # Average occupied blocks per context
+system.cpu.icache.overall_accesses 20440935 # number of overall (read+write) accesses
+system.cpu.icache.overall_avg_miss_latency 25661.556820 # average overall miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 22374.875175 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.icache.overall_hits 20435488 # number of overall hits
+system.cpu.icache.overall_miss_latency 139778500 # number of overall miss cycles
+system.cpu.icache.overall_miss_rate 0.000266 # miss rate for overall accesses
+system.cpu.icache.overall_misses 5447 # number of overall misses
+system.cpu.icache.overall_mshr_hits 440 # number of overall MSHR hits
+system.cpu.icache.overall_mshr_miss_latency 112031000 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_rate 0.000245 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_misses 5007 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.icache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.icache.replacements 3101 # number of replacements
+system.cpu.icache.sampled_refs 5006 # Sample count of references to valid blocks.
+system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.icache.tagsinuse 1529.828433 # Cycle average of tags in use
+system.cpu.icache.total_refs 20435488 # Total number of references to valid blocks.
+system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.icache.writebacks 0 # number of writebacks
+system.cpu.idleCycles 124939 # Total number of cycles that the CPU has spent unscheduled due to idling
+system.cpu.iew.EXEC:branches 13366188 # Number of branches executed
+system.cpu.iew.EXEC:nop 0 # number of nop insts executed
+system.cpu.iew.EXEC:rate 0.954963 # Inst execution rate
+system.cpu.iew.EXEC:refs 84717237 # number of memory reference insts executed
+system.cpu.iew.EXEC:stores 21535662 # Number of stores executed
+system.cpu.iew.EXEC:swp 0 # number of swp insts executed
+system.cpu.iew.WB:consumers 389337537 # num instructions consuming a value
+system.cpu.iew.WB:count 241459353 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.499412 # average fanout of values written-back
+system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
+system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
+system.cpu.iew.WB:producers 194439848 # num instructions producing a value
+system.cpu.iew.WB:rate 0.946450 # insts written-back per cycle
+system.cpu.iew.WB:sent 242120517 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 3656523 # Number of branch mispredicts detected at execute
+system.cpu.iew.iewBlockCycles 214895 # Number of cycles IEW is blocking
+system.cpu.iew.iewDispLoadInsts 75869162 # Number of dispatched load instructions
+system.cpu.iew.iewDispNonSpecInsts 1275 # Number of dispatched non-speculative instructions
+system.cpu.iew.iewDispSquashedInsts 2489008 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispStoreInsts 25600521 # Number of dispatched store instructions
+system.cpu.iew.iewDispatchedInsts 291514094 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 63181575 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 4005104 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 243631219 # Number of executed instructions
+system.cpu.iew.iewIQFullEvents 25200 # Number of times the IQ has become full, causing a stall
+system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
+system.cpu.iew.iewLSQFullEvents 0 # Number of times the LSQ has become full, causing a stall
+system.cpu.iew.iewSquashCycles 11003980 # Number of cycles IEW is squashing
+system.cpu.iew.iewUnblockCycles 40028 # Number of cycles IEW is unblocking
+system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding
+system.cpu.iew.lsq.thread.0.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked
+system.cpu.iew.lsq.thread.0.forwLoads 11103688 # Number of loads that had data forwarded from stores
+system.cpu.iew.lsq.thread.0.ignoredResponses 71380 # Number of memory responses ignored because the instruction is squashed
+system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.memOrderViolation 879354 # Number of memory ordering violations
+system.cpu.iew.lsq.thread.0.rescheduledLoads 44904 # Number of loads that were rescheduled
+system.cpu.iew.lsq.thread.0.squashedLoads 19219572 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedStores 5084805 # Number of stores squashed
+system.cpu.iew.memOrderViolationEvents 879354 # Number of memory order violations
+system.cpu.iew.predictedNotTakenIncorrect 151398 # Number of branches that were predicted not taken incorrectly
+system.cpu.iew.predictedTakenIncorrect 3505125 # Number of branches that were predicted taken incorrectly
+system.cpu.ipc 0.867678 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 0.867678 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0::No_OpClass 1180294 0.48% 0.48% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntAlu 158353329 63.95% 64.42% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntMult 0 0.00% 64.42% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntDiv 0 0.00% 64.42% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatAdd 1520272 0.61% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCmp 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCvt 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatMult 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatDiv 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatSqrt 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAdd 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAddAcc 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAlu 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCmp 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCvt 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMisc 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMult 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMultAcc 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShift 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShiftAcc 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdSqrt 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAdd 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAlu 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCmp 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCvt 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatDiv 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMisc 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMult 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMultAcc 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatSqrt 0 0.00% 65.04% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemRead 64587764 26.08% 91.12% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemWrite 21994664 8.88% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IprAccess 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::InstPrefetch 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::total 247636323 # Type of FU issued
+system.cpu.iq.ISSUE:fu_busy_cnt 40899 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_rate 0.000165 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_full::No_OpClass 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntAlu 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntDiv 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatAdd 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCmp 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCvt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatDiv 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatSqrt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAdd 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAddAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAlu 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCmp 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCvt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMisc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMultAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShift 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShiftAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdSqrt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAdd 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAlu 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCmp 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCvt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatDiv 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMisc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMultAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatSqrt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemRead 37912 92.70% 92.70% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemWrite 2987 7.30% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IprAccess 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::InstPrefetch 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:issued_per_cycle::samples 254996147 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::mean 0.971138 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::stdev 0.960460 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::underflows 0 0.00% 0.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::0 97493255 38.23% 38.23% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::1 86911390 34.08% 72.32% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::2 54912481 21.53% 93.85% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::3 12234045 4.80% 98.65% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::4 3109625 1.22% 99.87% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::5 255105 0.10% 99.97% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::6 77911 0.03% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::7 2335 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::8 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::overflows 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::min_value 0 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::max_value 7 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::total 254996147 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:rate 0.970662 # Inst issue rate
+system.cpu.iq.iqInstsAdded 291512819 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 247636323 # Number of instructions issued
+system.cpu.iq.iqNonSpecInstsAdded 1275 # Number of non-speculative instructions added to the IQ
+system.cpu.iq.iqSquashedInstsExamined 69673728 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedInstsIssued 1298 # Number of squashed instructions issued
+system.cpu.iq.iqSquashedNonSpecRemoved 29 # Number of squashed non-spec instructions that were removed
+system.cpu.iq.iqSquashedOperandsExamined 182988092 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadExReq_accesses 1575 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_avg_miss_latency 34364.012739 # average ReadExReq miss latency
+system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 31058.917197 # average ReadExReq mshr miss latency
+system.cpu.l2cache.ReadExReq_hits 5 # number of ReadExReq hits
+system.cpu.l2cache.ReadExReq_miss_latency 53951500 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_rate 0.996825 # miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_misses 1570 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 48762500 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_mshr_miss_rate 0.996825 # mshr miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_mshr_misses 1570 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadReq_accesses 5367 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_avg_miss_latency 34265.528407 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 31035.178098 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_hits 1970 # number of ReadReq hits
+system.cpu.l2cache.ReadReq_miss_latency 116400000 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.632942 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 3397 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 105426500 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.632942 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 3397 # number of ReadReq MSHR misses
+system.cpu.l2cache.Writeback_accesses 9 # number of Writeback accesses(hits+misses)
+system.cpu.l2cache.Writeback_hits 9 # number of Writeback hits
+system.cpu.l2cache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_refs 0.579412 # Average number of references to valid blocks.
+system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.cache_copies 0 # number of cache copies performed
+system.cpu.l2cache.demand_accesses 6942 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_avg_miss_latency 34296.657942 # average overall miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 31042.681699 # average overall mshr miss latency
+system.cpu.l2cache.demand_hits 1975 # number of demand (read+write) hits
+system.cpu.l2cache.demand_miss_latency 170351500 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.715500 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 4967 # number of demand (read+write) misses
+system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
+system.cpu.l2cache.demand_mshr_miss_latency 154189000 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.715500 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 4967 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.fast_writes 0 # number of fast writes performed
+system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.l2cache.occ_%::0 0.068086 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_%::1 0.000031 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_blocks::0 2231.049035 # Average occupied blocks per context
+system.cpu.l2cache.occ_blocks::1 1.015700 # Average occupied blocks per context
+system.cpu.l2cache.overall_accesses 6942 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_avg_miss_latency 34296.657942 # average overall miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 31042.681699 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_hits 1975 # number of overall hits
+system.cpu.l2cache.overall_miss_latency 170351500 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.715500 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 4967 # number of overall misses
+system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
+system.cpu.l2cache.overall_mshr_miss_latency 154189000 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.715500 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 4967 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.l2cache.replacements 0 # number of replacements
+system.cpu.l2cache.sampled_refs 3400 # Sample count of references to valid blocks.
+system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.l2cache.tagsinuse 2232.064735 # Cycle average of tags in use
+system.cpu.l2cache.total_refs 1970 # Total number of references to valid blocks.
+system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.l2cache.writebacks 0 # number of writebacks
+system.cpu.memDep0.conflictingLoads 21807942 # Number of conflicting loads.
+system.cpu.memDep0.conflictingStores 4495847 # Number of conflicting stores.
+system.cpu.memDep0.insertedLoads 75869162 # Number of loads inserted to the mem dependence unit.
+system.cpu.memDep0.insertedStores 25600521 # Number of stores inserted to the mem dependence unit.
+system.cpu.numCycles 255121086 # number of cpu cycles simulated
+system.cpu.rename.RENAME:BlockCycles 1303093 # Number of cycles rename is blocking
+system.cpu.rename.RENAME:CommittedMaps 234363409 # Number of HB maps that are committed
+system.cpu.rename.RENAME:IQFullEvents 2662460 # Number of times rename has blocked due to IQ full
+system.cpu.rename.RENAME:IdleCycles 57579297 # Number of cycles rename is idle
+system.cpu.rename.RENAME:LSQFullEvents 975892 # Number of times rename has blocked due to LSQ full
+system.cpu.rename.RENAME:RenameLookups 963293874 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 304077108 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 331962025 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 180705413 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 11003980 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:UnblockCycles 4387817 # Number of cycles rename is unblocking
+system.cpu.rename.RENAME:UndoneMaps 97598616 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 16547 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:serializingInsts 1274 # count of serializing insts renamed
+system.cpu.rename.RENAME:skidInsts 8156807 # count of insts added to the skid buffer
+system.cpu.rename.RENAME:tempSerializingInsts 1279 # count of temporary serializing insts renamed
+system.cpu.timesIdled 2349 # Number of times that the entire CPU went into an idle state and unscheduled itself
+system.cpu.workload.PROG:num_syscalls 400 # Number of system calls
+
+---------- End Simulation Statistics ----------
diff --git a/tests/quick/00.hello/ref/x86/linux/o3-timing/config.ini b/tests/quick/00.hello/ref/x86/linux/o3-timing/config.ini
new file mode 100644
index 000000000..202d0b795
--- /dev/null
+++ b/tests/quick/00.hello/ref/x86/linux/o3-timing/config.ini
@@ -0,0 +1,519 @@
+[root]
+type=Root
+children=system
+time_sync_enable=false
+time_sync_period=100000000000
+time_sync_spin_threshold=100000000
+
+[system]
+type=System
+children=cpu membus physmem
+mem_mode=atomic
+physmem=system.physmem
+
+[system.cpu]
+type=DerivO3CPU
+children=dcache dtb fuPool icache itb l2cache toL2Bus tracer workload
+BTBEntries=4096
+BTBTagSize=16
+LFSTSize=1024
+LQEntries=32
+RASSize=16
+SQEntries=32
+SSITSize=1024
+activity=0
+backComSize=5
+cachePorts=200
+checker=Null
+choiceCtrBits=2
+choicePredictorSize=8192
+clock=500
+commitToDecodeDelay=1
+commitToFetchDelay=1
+commitToIEWDelay=1
+commitToRenameDelay=1
+commitWidth=8
+cpu_id=0
+decodeToFetchDelay=1
+decodeToRenameDelay=1
+decodeWidth=8
+defer_registration=false
+dispatchWidth=8
+do_checkpoint_insts=true
+do_statistics_insts=true
+dtb=system.cpu.dtb
+fetchToDecodeDelay=1
+fetchTrapLatency=1
+fetchWidth=8
+forwardComSize=5
+fuPool=system.cpu.fuPool
+function_trace=false
+function_trace_start=0
+globalCtrBits=2
+globalHistoryBits=13
+globalPredictorSize=8192
+iewToCommitDelay=1
+iewToDecodeDelay=1
+iewToFetchDelay=1
+iewToRenameDelay=1
+instShiftAmt=2
+issueToExecuteDelay=1
+issueWidth=8
+itb=system.cpu.itb
+localCtrBits=2
+localHistoryBits=11
+localHistoryTableSize=2048
+localPredictorSize=2048
+max_insts_all_threads=0
+max_insts_any_thread=0
+max_loads_all_threads=0
+max_loads_any_thread=0
+numIQEntries=64
+numPhysFloatRegs=256
+numPhysIntRegs=256
+numROBEntries=192
+numRobs=1
+numThreads=1
+phase=0
+predType=tournament
+progress_interval=0
+renameToDecodeDelay=1
+renameToFetchDelay=1
+renameToIEWDelay=2
+renameToROBDelay=1
+renameWidth=8
+smtCommitPolicy=RoundRobin
+smtFetchPolicy=SingleThread
+smtIQPolicy=Partitioned
+smtIQThreshold=100
+smtLSQPolicy=Partitioned
+smtLSQThreshold=100
+smtNumFetchingThreads=1
+smtROBPolicy=Partitioned
+smtROBThreshold=100
+squashWidth=8
+system=system
+tracer=system.cpu.tracer
+trapLatency=13
+wbDepth=1
+wbWidth=8
+workload=system.cpu.workload
+dcache_port=system.cpu.dcache.cpu_side
+icache_port=system.cpu.icache.cpu_side
+
+[system.cpu.dcache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=262144
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.dcache_port
+mem_side=system.cpu.toL2Bus.port[1]
+
+[system.cpu.dtb]
+type=X86TLB
+size=64
+
+[system.cpu.fuPool]
+type=FUPool
+children=FUList0 FUList1 FUList2 FUList3 FUList4 FUList5 FUList6 FUList7 FUList8
+FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 system.cpu.fuPool.FUList8
+
+[system.cpu.fuPool.FUList0]
+type=FUDesc
+children=opList
+count=6
+opList=system.cpu.fuPool.FUList0.opList
+
+[system.cpu.fuPool.FUList0.opList]
+type=OpDesc
+issueLat=1
+opClass=IntAlu
+opLat=1
+
+[system.cpu.fuPool.FUList1]
+type=FUDesc
+children=opList0 opList1
+count=2
+opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1
+
+[system.cpu.fuPool.FUList1.opList0]
+type=OpDesc
+issueLat=1
+opClass=IntMult
+opLat=3
+
+[system.cpu.fuPool.FUList1.opList1]
+type=OpDesc
+issueLat=19
+opClass=IntDiv
+opLat=20
+
+[system.cpu.fuPool.FUList2]
+type=FUDesc
+children=opList0 opList1 opList2
+count=4
+opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2
+
+[system.cpu.fuPool.FUList2.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatAdd
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList1]
+type=OpDesc
+issueLat=1
+opClass=FloatCmp
+opLat=2
+
+[system.cpu.fuPool.FUList2.opList2]
+type=OpDesc
+issueLat=1
+opClass=FloatCvt
+opLat=2
+
+[system.cpu.fuPool.FUList3]
+type=FUDesc
+children=opList0 opList1 opList2
+count=2
+opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2
+
+[system.cpu.fuPool.FUList3.opList0]
+type=OpDesc
+issueLat=1
+opClass=FloatMult
+opLat=4
+
+[system.cpu.fuPool.FUList3.opList1]
+type=OpDesc
+issueLat=12
+opClass=FloatDiv
+opLat=12
+
+[system.cpu.fuPool.FUList3.opList2]
+type=OpDesc
+issueLat=24
+opClass=FloatSqrt
+opLat=24
+
+[system.cpu.fuPool.FUList4]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList4.opList
+
+[system.cpu.fuPool.FUList4.opList]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList5]
+type=FUDesc
+children=opList00 opList01 opList02 opList03 opList04 opList05 opList06 opList07 opList08 opList09 opList10 opList11 opList12 opList13 opList14 opList15 opList16 opList17 opList18 opList19
+count=4
+opList=system.cpu.fuPool.FUList5.opList00 system.cpu.fuPool.FUList5.opList01 system.cpu.fuPool.FUList5.opList02 system.cpu.fuPool.FUList5.opList03 system.cpu.fuPool.FUList5.opList04 system.cpu.fuPool.FUList5.opList05 system.cpu.fuPool.FUList5.opList06 system.cpu.fuPool.FUList5.opList07 system.cpu.fuPool.FUList5.opList08 system.cpu.fuPool.FUList5.opList09 system.cpu.fuPool.FUList5.opList10 system.cpu.fuPool.FUList5.opList11 system.cpu.fuPool.FUList5.opList12 system.cpu.fuPool.FUList5.opList13 system.cpu.fuPool.FUList5.opList14 system.cpu.fuPool.FUList5.opList15 system.cpu.fuPool.FUList5.opList16 system.cpu.fuPool.FUList5.opList17 system.cpu.fuPool.FUList5.opList18 system.cpu.fuPool.FUList5.opList19
+
+[system.cpu.fuPool.FUList5.opList00]
+type=OpDesc
+issueLat=1
+opClass=SimdAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList01]
+type=OpDesc
+issueLat=1
+opClass=SimdAddAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList02]
+type=OpDesc
+issueLat=1
+opClass=SimdAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList03]
+type=OpDesc
+issueLat=1
+opClass=SimdCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList04]
+type=OpDesc
+issueLat=1
+opClass=SimdCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList05]
+type=OpDesc
+issueLat=1
+opClass=SimdMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList06]
+type=OpDesc
+issueLat=1
+opClass=SimdMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList07]
+type=OpDesc
+issueLat=1
+opClass=SimdMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList08]
+type=OpDesc
+issueLat=1
+opClass=SimdShift
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList09]
+type=OpDesc
+issueLat=1
+opClass=SimdShiftAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList10]
+type=OpDesc
+issueLat=1
+opClass=SimdSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList11]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAdd
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList12]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatAlu
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList13]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCmp
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList14]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatCvt
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList15]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatDiv
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList16]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMisc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList17]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMult
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList18]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatMultAcc
+opLat=1
+
+[system.cpu.fuPool.FUList5.opList19]
+type=OpDesc
+issueLat=1
+opClass=SimdFloatSqrt
+opLat=1
+
+[system.cpu.fuPool.FUList6]
+type=FUDesc
+children=opList
+count=0
+opList=system.cpu.fuPool.FUList6.opList
+
+[system.cpu.fuPool.FUList6.opList]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList7]
+type=FUDesc
+children=opList0 opList1
+count=4
+opList=system.cpu.fuPool.FUList7.opList0 system.cpu.fuPool.FUList7.opList1
+
+[system.cpu.fuPool.FUList7.opList0]
+type=OpDesc
+issueLat=1
+opClass=MemRead
+opLat=1
+
+[system.cpu.fuPool.FUList7.opList1]
+type=OpDesc
+issueLat=1
+opClass=MemWrite
+opLat=1
+
+[system.cpu.fuPool.FUList8]
+type=FUDesc
+children=opList
+count=1
+opList=system.cpu.fuPool.FUList8.opList
+
+[system.cpu.fuPool.FUList8.opList]
+type=OpDesc
+issueLat=3
+opClass=IprAccess
+opLat=3
+
+[system.cpu.icache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=131072
+subblock_size=0
+tgts_per_mshr=20
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.icache_port
+mem_side=system.cpu.toL2Bus.port[0]
+
+[system.cpu.itb]
+type=X86TLB
+size=64
+
+[system.cpu.l2cache]
+type=BaseCache
+addr_range=0:18446744073709551615
+assoc=2
+block_size=64
+forward_snoops=true
+hash_delay=1
+latency=1000
+max_miss_count=0
+mshrs=10
+num_cpus=1
+prefetch_data_accesses_only=false
+prefetch_degree=1
+prefetch_latency=10000
+prefetch_on_access=false
+prefetch_past_page=false
+prefetch_policy=none
+prefetch_serial_squash=false
+prefetch_use_cpu_id=true
+prefetcher_size=100
+prioritizeRequests=false
+repl=Null
+size=2097152
+subblock_size=0
+tgts_per_mshr=5
+trace_addr=0
+two_queue=false
+write_buffers=8
+cpu_side=system.cpu.toL2Bus.port[2]
+mem_side=system.membus.port[1]
+
+[system.cpu.toL2Bus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
+
+[system.cpu.tracer]
+type=ExeTracer
+
+[system.cpu.workload]
+type=LiveProcess
+cmd=hello
+cwd=
+egid=100
+env=
+errout=cerr
+euid=100
+executable=/dist/m5/regression/test-progs/hello/bin/x86/linux/hello
+gid=100
+input=cin
+max_stack_size=67108864
+output=cout
+pid=100
+ppid=99
+simpoint=0
+system=system
+uid=100
+
+[system.membus]
+type=Bus
+block_size=64
+bus_id=0
+clock=1000
+header_cycles=1
+use_default_range=false
+width=64
+port=system.physmem.port[0] system.cpu.l2cache.mem_side
+
+[system.physmem]
+type=PhysicalMemory
+file=
+latency=30000
+latency_var=0
+null=false
+range=0:134217727
+zero=false
+port=system.membus.port[0]
+
diff --git a/tests/quick/00.hello/ref/x86/linux/o3-timing/simerr b/tests/quick/00.hello/ref/x86/linux/o3-timing/simerr
new file mode 100755
index 000000000..94d399eab
--- /dev/null
+++ b/tests/quick/00.hello/ref/x86/linux/o3-timing/simerr
@@ -0,0 +1,7 @@
+warn: Sockets disabled, not accepting gdb connections
+For more information see: http://www.m5sim.org/warn/d946bea6
+warn: instruction 'fnstcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+warn: instruction 'fldcw_Mw' unimplemented
+For more information see: http://www.m5sim.org/warn/437d5238
+hack: be nice to actually delete the event here
diff --git a/tests/quick/00.hello/ref/x86/linux/o3-timing/simout b/tests/quick/00.hello/ref/x86/linux/o3-timing/simout
new file mode 100755
index 000000000..12f04436f
--- /dev/null
+++ b/tests/quick/00.hello/ref/x86/linux/o3-timing/simout
@@ -0,0 +1,16 @@
+M5 Simulator System
+
+Copyright (c) 2001-2008
+The Regents of The University of Michigan
+All Rights Reserved
+
+
+M5 compiled Jan 31 2011 16:34:44
+M5 revision 1b98eea40540 7883 default qtip tip x86o3regressions.patch
+M5 started Jan 31 2011 16:34:46
+M5 executing on burrito
+command line: build/X86_SE/m5.opt -d build/X86_SE/tests/opt/quick/00.hello/x86/linux/o3-timing -re tests/run.py build/X86_SE/tests/opt/quick/00.hello/x86/linux/o3-timing
+Global frequency set at 1000000000000 ticks per second
+info: Entering event queue @ 0. Starting simulation...
+Hello world!
+Exiting @ tick 13766000 because target called exit()
diff --git a/tests/quick/00.hello/ref/x86/linux/o3-timing/stats.txt b/tests/quick/00.hello/ref/x86/linux/o3-timing/stats.txt
new file mode 100644
index 000000000..0a112c922
--- /dev/null
+++ b/tests/quick/00.hello/ref/x86/linux/o3-timing/stats.txt
@@ -0,0 +1,437 @@
+
+---------- Begin Simulation Statistics ----------
+host_inst_rate 48300 # Simulator instruction rate (inst/s)
+host_mem_usage 226820 # Number of bytes of host memory used
+host_seconds 0.20 # Real time elapsed on the host
+host_tick_rate 67673766 # Simulator tick rate (ticks/s)
+sim_freq 1000000000000 # Frequency of simulated ticks
+sim_insts 9809 # Number of instructions simulated
+sim_seconds 0.000014 # Number of seconds simulated
+sim_ticks 13766000 # Number of ticks simulated
+system.cpu.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
+system.cpu.BPredUnit.BTBHits 772 # Number of BTB hits
+system.cpu.BPredUnit.BTBLookups 1892 # Number of BTB lookups
+system.cpu.BPredUnit.RASInCorrect 0 # Number of incorrect RAS predictions.
+system.cpu.BPredUnit.condIncorrect 458 # Number of conditional branches incorrect
+system.cpu.BPredUnit.condPredicted 1920 # Number of conditional branches predicted
+system.cpu.BPredUnit.lookups 1920 # Number of BP lookups
+system.cpu.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
+system.cpu.commit.COM:branches 1214 # Number of branches committed
+system.cpu.commit.COM:bw_lim_events 37 # number cycles where commit BW limit reached
+system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits
+system.cpu.commit.COM:committed_per_cycle::samples 15124 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::mean 0.648572 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::stdev 1.100130 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::underflows 0 0.00% 0.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::0 9612 63.55% 63.55% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::1 3088 20.42% 83.97% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::2 1220 8.07% 92.04% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::3 836 5.53% 97.57% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::4 232 1.53% 99.10% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::5 57 0.38% 99.48% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::6 30 0.20% 99.68% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::7 12 0.08% 99.76% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::8 37 0.24% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::overflows 0 0.00% 100.00% # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::min_value 0 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::max_value 8 # Number of insts commited each cycle
+system.cpu.commit.COM:committed_per_cycle::total 15124 # Number of insts commited each cycle
+system.cpu.commit.COM:count 9809 # Number of instructions committed
+system.cpu.commit.COM:loads 1056 # Number of loads committed
+system.cpu.commit.COM:membars 0 # Number of memory barriers committed
+system.cpu.commit.COM:refs 1990 # Number of memory references committed
+system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
+system.cpu.commit.branchMispredicts 462 # The number of times a branch was mispredicted
+system.cpu.commit.commitCommittedInsts 9809 # The number of committed instructions
+system.cpu.commit.commitNonSpecStalls 13 # The number of times commit has been forced to stall to communicate backwards
+system.cpu.commit.commitSquashedInsts 3832 # The number of squashed insts skipped by commit
+system.cpu.committedInsts 9809 # Number of Instructions Simulated
+system.cpu.committedInsts_total 9809 # Number of Instructions Simulated
+system.cpu.cpi 2.806912 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 2.806912 # CPI: Total CPI of All Threads
+system.cpu.dcache.ReadReq_accesses 1244 # number of ReadReq accesses(hits+misses)
+system.cpu.dcache.ReadReq_avg_miss_latency 37105.263158 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 35048.387097 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 1168 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 2820000 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.061093 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 76 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_hits 14 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 2173000 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.049839 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_misses 62 # number of ReadReq MSHR misses
+system.cpu.dcache.WriteReq_accesses 934 # number of WriteReq accesses(hits+misses)
+system.cpu.dcache.WriteReq_avg_miss_latency 33138.977636 # average WriteReq miss latency
+system.cpu.dcache.WriteReq_avg_mshr_miss_latency 35775.641026 # average WriteReq mshr miss latency
+system.cpu.dcache.WriteReq_hits 621 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 10372500 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.335118 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 313 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_hits 235 # number of WriteReq MSHR hits
+system.cpu.dcache.WriteReq_mshr_miss_latency 2790500 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.083512 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 78 # number of WriteReq MSHR misses
+system.cpu.dcache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.dcache.avg_refs 12.870504 # Average number of references to valid blocks.
+system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.dcache.cache_copies 0 # number of cache copies performed
+system.cpu.dcache.demand_accesses 2178 # number of demand (read+write) accesses
+system.cpu.dcache.demand_avg_miss_latency 33913.881748 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 35453.571429 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 1789 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 13192500 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.178604 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 389 # number of demand (read+write) misses
+system.cpu.dcache.demand_mshr_hits 249 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_miss_latency 4963500 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.064279 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 140 # number of demand (read+write) MSHR misses
+system.cpu.dcache.fast_writes 0 # number of fast writes performed
+system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.dcache.occ_%::0 0.020744 # Average percentage of cache occupancy
+system.cpu.dcache.occ_blocks::0 84.965644 # Average occupied blocks per context
+system.cpu.dcache.overall_accesses 2178 # number of overall (read+write) accesses
+system.cpu.dcache.overall_avg_miss_latency 33913.881748 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 35453.571429 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_hits 1789 # number of overall hits
+system.cpu.dcache.overall_miss_latency 13192500 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.178604 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 389 # number of overall misses
+system.cpu.dcache.overall_mshr_hits 249 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_miss_latency 4963500 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.064279 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 140 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.dcache.replacements 0 # number of replacements
+system.cpu.dcache.sampled_refs 139 # Sample count of references to valid blocks.
+system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.dcache.tagsinuse 84.965644 # Cycle average of tags in use
+system.cpu.dcache.total_refs 1789 # Total number of references to valid blocks.
+system.cpu.dcache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.dcache.writebacks 0 # number of writebacks
+system.cpu.decode.DECODE:BlockedCycles 464 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:DecodedInsts 15304 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 6233 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 8371 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 721 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:UnblockCycles 56 # Number of cycles decode is unblocking
+system.cpu.fetch.Branches 1920 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 1255 # Number of cache lines fetched
+system.cpu.fetch.Cycles 9031 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 117 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 8830 # Number of instructions fetch has processed
+system.cpu.fetch.MiscStallCycles 8 # Number of cycles fetch has spent waiting on interrupts, or bad addresses, or out of MSHRs
+system.cpu.fetch.SquashCycles 469 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.069735 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 1255 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.fetch.predictedBranches 772 # Number of branches that fetch has predicted taken
+system.cpu.fetch.rate 0.320706 # Number of inst fetches per cycle
+system.cpu.fetch.rateDist::samples 15845 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::mean 1.002083 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::stdev 1.178869 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::underflows 0 0.00% 0.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::0 7129 44.99% 44.99% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::1 4489 28.33% 73.32% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::2 1878 11.85% 85.18% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::3 2046 12.91% 98.09% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::4 57 0.36% 98.45% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::5 227 1.43% 99.88% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::6 6 0.04% 99.92% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::7 8 0.05% 99.97% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::8 5 0.03% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::overflows 0 0.00% 100.00% # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::min_value 0 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::max_value 8 # Number of instructions fetched each cycle (Total)
+system.cpu.fetch.rateDist::total 15845 # Number of instructions fetched each cycle (Total)
+system.cpu.icache.ReadReq_accesses 1255 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_avg_miss_latency 37417.543860 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 35040.697674 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 970 # number of ReadReq hits
+system.cpu.icache.ReadReq_miss_latency 10664000 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_rate 0.227092 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_misses 285 # number of ReadReq misses
+system.cpu.icache.ReadReq_mshr_hits 27 # number of ReadReq MSHR hits
+system.cpu.icache.ReadReq_mshr_miss_latency 9040500 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.205578 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_misses 258 # number of ReadReq MSHR misses
+system.cpu.icache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.icache.avg_refs 3.759690 # Average number of references to valid blocks.
+system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.icache.cache_copies 0 # number of cache copies performed
+system.cpu.icache.demand_accesses 1255 # number of demand (read+write) accesses
+system.cpu.icache.demand_avg_miss_latency 37417.543860 # average overall miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency 35040.697674 # average overall mshr miss latency
+system.cpu.icache.demand_hits 970 # number of demand (read+write) hits
+system.cpu.icache.demand_miss_latency 10664000 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_rate 0.227092 # miss rate for demand accesses
+system.cpu.icache.demand_misses 285 # number of demand (read+write) misses
+system.cpu.icache.demand_mshr_hits 27 # number of demand (read+write) MSHR hits
+system.cpu.icache.demand_mshr_miss_latency 9040500 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_rate 0.205578 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_misses 258 # number of demand (read+write) MSHR misses
+system.cpu.icache.fast_writes 0 # number of fast writes performed
+system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.icache.occ_%::0 0.061525 # Average percentage of cache occupancy
+system.cpu.icache.occ_blocks::0 126.002915 # Average occupied blocks per context
+system.cpu.icache.overall_accesses 1255 # number of overall (read+write) accesses
+system.cpu.icache.overall_avg_miss_latency 37417.543860 # average overall miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 35040.697674 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.icache.overall_hits 970 # number of overall hits
+system.cpu.icache.overall_miss_latency 10664000 # number of overall miss cycles
+system.cpu.icache.overall_miss_rate 0.227092 # miss rate for overall accesses
+system.cpu.icache.overall_misses 285 # number of overall misses
+system.cpu.icache.overall_mshr_hits 27 # number of overall MSHR hits
+system.cpu.icache.overall_mshr_miss_latency 9040500 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_rate 0.205578 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_misses 258 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.icache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.icache.replacements 0 # number of replacements
+system.cpu.icache.sampled_refs 258 # Sample count of references to valid blocks.
+system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.icache.tagsinuse 126.002915 # Cycle average of tags in use
+system.cpu.icache.total_refs 970 # Total number of references to valid blocks.
+system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.icache.writebacks 0 # number of writebacks
+system.cpu.idleCycles 11688 # Total number of cycles that the CPU has spent unscheduled due to idling
+system.cpu.iew.EXEC:branches 1318 # Number of branches executed
+system.cpu.iew.EXEC:nop 0 # number of nop insts executed
+system.cpu.iew.EXEC:rate 0.434678 # Inst execution rate
+system.cpu.iew.EXEC:refs 2353 # number of memory reference insts executed
+system.cpu.iew.EXEC:stores 1060 # Number of stores executed
+system.cpu.iew.EXEC:swp 0 # number of swp insts executed
+system.cpu.iew.WB:consumers 10358 # num instructions consuming a value
+system.cpu.iew.WB:count 11818 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.702935 # average fanout of values written-back
+system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
+system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
+system.cpu.iew.WB:producers 7281 # num instructions producing a value
+system.cpu.iew.WB:rate 0.429230 # insts written-back per cycle
+system.cpu.iew.WB:sent 11866 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 487 # Number of branch mispredicts detected at execute
+system.cpu.iew.iewBlockCycles 58 # Number of cycles IEW is blocking
+system.cpu.iew.iewDispLoadInsts 1535 # Number of dispatched load instructions
+system.cpu.iew.iewDispNonSpecInsts 17 # Number of dispatched non-speculative instructions
+system.cpu.iew.iewDispSquashedInsts 418 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispStoreInsts 1238 # Number of dispatched store instructions
+system.cpu.iew.iewDispatchedInsts 13635 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 1293 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 536 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 11968 # Number of executed instructions
+system.cpu.iew.iewIQFullEvents 3 # Number of times the IQ has become full, causing a stall
+system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
+system.cpu.iew.iewLSQFullEvents 1 # Number of times the LSQ has become full, causing a stall
+system.cpu.iew.iewSquashCycles 721 # Number of cycles IEW is squashing
+system.cpu.iew.iewUnblockCycles 6 # Number of cycles IEW is unblocking
+system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding
+system.cpu.iew.lsq.thread.0.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked
+system.cpu.iew.lsq.thread.0.forwLoads 21 # Number of loads that had data forwarded from stores
+system.cpu.iew.lsq.thread.0.ignoredResponses 1 # Number of memory responses ignored because the instruction is squashed
+system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
+system.cpu.iew.lsq.thread.0.memOrderViolation 7 # Number of memory ordering violations
+system.cpu.iew.lsq.thread.0.rescheduledLoads 0 # Number of loads that were rescheduled
+system.cpu.iew.lsq.thread.0.squashedLoads 479 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedStores 304 # Number of stores squashed
+system.cpu.iew.memOrderViolationEvents 7 # Number of memory order violations
+system.cpu.iew.predictedNotTakenIncorrect 390 # Number of branches that were predicted not taken incorrectly
+system.cpu.iew.predictedTakenIncorrect 97 # Number of branches that were predicted taken incorrectly
+system.cpu.ipc 0.356263 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 0.356263 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0::No_OpClass 3 0.02% 0.02% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntAlu 10018 80.12% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntMult 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IntDiv 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatAdd 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCmp 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatCvt 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatMult 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatDiv 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::FloatSqrt 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAdd 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAddAcc 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdAlu 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCmp 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdCvt 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMisc 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMult 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdMultAcc 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShift 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdShiftAcc 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdSqrt 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAdd 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatAlu 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCmp 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatCvt 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatDiv 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMisc 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMult 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatMultAcc 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::SimdFloatSqrt 0 0.00% 80.14% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemRead 1360 10.88% 91.02% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::MemWrite 1123 8.98% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::IprAccess 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::InstPrefetch 0 0.00% 100.00% # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_0::total 12504 # Type of FU issued
+system.cpu.iq.ISSUE:fu_busy_cnt 4 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_rate 0.000320 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_full::No_OpClass 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntAlu 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IntDiv 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatAdd 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCmp 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatCvt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatDiv 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::FloatSqrt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAdd 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAddAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdAlu 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCmp 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdCvt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMisc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdMultAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShift 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdShiftAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdSqrt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAdd 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatAlu 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCmp 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatCvt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatDiv 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMisc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMult 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatMultAcc 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::SimdFloatSqrt 0 0.00% 0.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemRead 3 75.00% 75.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::MemWrite 1 25.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::IprAccess 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:fu_full::InstPrefetch 0 0.00% 100.00% # attempts to use FU when none available
+system.cpu.iq.ISSUE:issued_per_cycle::samples 15845 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::mean 0.789145 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::stdev 0.977935 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::underflows 0 0.00% 0.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::0 8160 51.50% 51.50% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::1 4079 25.74% 77.24% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::2 2594 16.37% 93.61% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::3 834 5.26% 98.88% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::4 157 0.99% 99.87% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::5 19 0.12% 99.99% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::6 2 0.01% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::7 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::8 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::overflows 0 0.00% 100.00% # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::min_value 0 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::max_value 6 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:issued_per_cycle::total 15845 # Number of insts issued each cycle
+system.cpu.iq.ISSUE:rate 0.454146 # Inst issue rate
+system.cpu.iq.iqInstsAdded 13618 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 12504 # Number of instructions issued
+system.cpu.iq.iqNonSpecInstsAdded 17 # Number of non-speculative instructions added to the IQ
+system.cpu.iq.iqSquashedInstsExamined 3342 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedNonSpecRemoved 4 # Number of squashed non-spec instructions that were removed
+system.cpu.iq.iqSquashedOperandsExamined 5066 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadExReq_accesses 78 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_avg_miss_latency 34493.589744 # average ReadExReq miss latency
+system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 31326.923077 # average ReadExReq mshr miss latency
+system.cpu.l2cache.ReadExReq_miss_latency 2690500 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_rate 1 # miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_misses 78 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 2443500 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_mshr_miss_rate 1 # mshr miss rate for ReadExReq accesses
+system.cpu.l2cache.ReadExReq_mshr_misses 78 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadReq_accesses 320 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_avg_miss_latency 34188.679245 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 31004.716981 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_hits 2 # number of ReadReq hits
+system.cpu.l2cache.ReadReq_miss_latency 10872000 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.993750 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 318 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 9859500 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.993750 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 318 # number of ReadReq MSHR misses
+system.cpu.l2cache.avg_blocked_cycles::no_mshrs no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_blocked_cycles::no_targets no_value # average number of cycles each access was blocked
+system.cpu.l2cache.avg_refs 0.006309 # Average number of references to valid blocks.
+system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked
+system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked
+system.cpu.l2cache.cache_copies 0 # number of cache copies performed
+system.cpu.l2cache.demand_accesses 398 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_avg_miss_latency 34248.737374 # average overall miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 31068.181818 # average overall mshr miss latency
+system.cpu.l2cache.demand_hits 2 # number of demand (read+write) hits
+system.cpu.l2cache.demand_miss_latency 13562500 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.994975 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 396 # number of demand (read+write) misses
+system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
+system.cpu.l2cache.demand_mshr_miss_latency 12303000 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.994975 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 396 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.fast_writes 0 # number of fast writes performed
+system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
+system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
+system.cpu.l2cache.occ_%::0 0.004816 # Average percentage of cache occupancy
+system.cpu.l2cache.occ_blocks::0 157.820330 # Average occupied blocks per context
+system.cpu.l2cache.overall_accesses 398 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_avg_miss_latency 34248.737374 # average overall miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 31068.181818 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no_value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_hits 2 # number of overall hits
+system.cpu.l2cache.overall_miss_latency 13562500 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.994975 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 396 # number of overall misses
+system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
+system.cpu.l2cache.overall_mshr_miss_latency 12303000 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.994975 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 396 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
+system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
+system.cpu.l2cache.replacements 0 # number of replacements
+system.cpu.l2cache.sampled_refs 317 # Sample count of references to valid blocks.
+system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
+system.cpu.l2cache.tagsinuse 157.820330 # Cycle average of tags in use
+system.cpu.l2cache.total_refs 2 # Total number of references to valid blocks.
+system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
+system.cpu.l2cache.writebacks 0 # number of writebacks
+system.cpu.memDep0.conflictingLoads 4 # Number of conflicting loads.
+system.cpu.memDep0.conflictingStores 0 # Number of conflicting stores.
+system.cpu.memDep0.insertedLoads 1535 # Number of loads inserted to the mem dependence unit.
+system.cpu.memDep0.insertedStores 1238 # Number of stores inserted to the mem dependence unit.
+system.cpu.numCycles 27533 # number of cpu cycles simulated
+system.cpu.rename.RENAME:BlockCycles 105 # Number of cycles rename is blocking
+system.cpu.rename.RENAME:CommittedMaps 9368 # Number of HB maps that are committed
+system.cpu.rename.RENAME:IQFullEvents 6 # Number of times rename has blocked due to IQ full
+system.cpu.rename.RENAME:IdleCycles 6603 # Number of cycles rename is idle
+system.cpu.rename.RENAME:LSQFullEvents 15 # Number of times rename has blocked due to LSQ full
+system.cpu.rename.RENAME:RenameLookups 38664 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 14745 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 13787 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 8027 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 721 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:UnblockCycles 108 # Number of cycles rename is unblocking
+system.cpu.rename.RENAME:UndoneMaps 4419 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 281 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:serializingInsts 20 # count of serializing insts renamed
+system.cpu.rename.RENAME:skidInsts 169 # count of insts added to the skid buffer
+system.cpu.rename.RENAME:tempSerializingInsts 17 # count of temporary serializing insts renamed
+system.cpu.timesIdled 208 # Number of times that the entire CPU went into an idle state and unscheduled itself
+system.cpu.workload.PROG:num_syscalls 11 # Number of system calls
+
+---------- End Simulation Statistics ----------